applied-ai-018 commited on
Commit
b84f27a
·
verified ·
1 Parent(s): 95c8219

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  7. lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/logs/debug-internal.log +183 -0
  8. venv/lib/python3.10/site-packages/transformers/models/auto/__init__.py +403 -0
  9. venv/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py +984 -0
  10. venv/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py +396 -0
  11. venv/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py +1705 -0
  12. venv/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py +382 -0
  13. venv/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py +721 -0
  14. venv/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py +358 -0
  15. venv/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py +936 -0
  16. venv/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py +117 -0
  17. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py +170 -0
  25. venv/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py +60 -0
  26. venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py +1434 -0
  27. venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py +1656 -0
  28. venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py +503 -0
  29. venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py +169 -0
  30. venv/lib/python3.10/site-packages/transformers/models/mistral/__init__.py +82 -0
  31. venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/configuration_mistral.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/convert_mistral_weights_to_hf.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_flax_mistral.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_mistral.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/mistral/configuration_mistral.py +150 -0
  37. venv/lib/python3.10/site-packages/transformers/models/mistral/convert_mistral_weights_to_hf.py +276 -0
  38. venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_flax_mistral.py +741 -0
  39. venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_mistral.py +1387 -0
  40. venv/lib/python3.10/site-packages/transformers/models/roberta/__init__.py +164 -0
  41. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/configuration_roberta.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta_fast.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/roberta/configuration_roberta.py +154 -0
  50. venv/lib/python3.10/site-packages/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +178 -0
ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65404354c391879d0e09bf57e328fa8f9818e63acd3e66d1fac8b05c273b41d4
3
+ size 33555612
ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cc604eac633693be1e52ff5a0e110ba47bce0f325db59a591b898da11c0f8a7
3
+ size 33555627
ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff7ad7d0aefcb10d0fdd242fd7d979a4c0c50f990fb968c2e403b7410588b0f0
3
+ size 33555533
ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c10e9aec535e1b5c70d83f837c268cf1ffb619c95caa887b26009464308d3bef
3
+ size 50332843
ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:987b036cd0ddf213add9c3941dd71cd8ab78faa724b796e99cbd630d4a0443f3
3
+ size 50332749
ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60aa675e598d8c5d825f2700d6b0a3841d6db7410af126179fcd26a7b403d81c
3
+ size 33555533
lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-22 18:43:16,494 INFO StreamThr :1063 [internal.py:wandb_internal():85] W&B internal server running at pid: 1063, started at: 2024-05-22 18:43:16.492393
2
+ 2024-05-22 18:43:16,499 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-22 18:43:16,499 INFO WriterThread:1063 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb
4
+ 2024-05-22 18:43:16,502 DEBUG SenderThread:1063 [sender.py:send():378] send: header
5
+ 2024-05-22 18:43:16,505 DEBUG SenderThread:1063 [sender.py:send():378] send: run
6
+ 2024-05-22 18:43:16,841 INFO SenderThread:1063 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files
7
+ 2024-05-22 18:43:16,841 INFO SenderThread:1063 [sender.py:_start_run_threads():1123] run started: 86p21jxx with start time 1716403396.49225
8
+ 2024-05-22 18:43:16,846 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-22 18:43:16,846 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-22 18:43:16,962 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-22 18:43:16,964 DEBUG HandlerThread:1063 [system_info.py:__init__():26] System info init
12
+ 2024-05-22 18:43:16,964 DEBUG HandlerThread:1063 [system_info.py:__init__():41] System info init done
13
+ 2024-05-22 18:43:16,964 INFO HandlerThread:1063 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-22 18:43:16,964 INFO SystemMonitor:1063 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-22 18:43:16,964 INFO HandlerThread:1063 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-22 18:43:16,971 INFO SystemMonitor:1063 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-22 18:43:16,971 INFO SystemMonitor:1063 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-22 18:43:16,978 INFO SystemMonitor:1063 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-22 18:43:16,979 INFO SystemMonitor:1063 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-22 18:43:17,073 DEBUG HandlerThread:1063 [system_info.py:probe():150] Probing system
21
+ 2024-05-22 18:43:17,077 DEBUG HandlerThread:1063 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-22 18:43:17,086 ERROR HandlerThread:1063 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-22 18:43:17,086 DEBUG HandlerThread:1063 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-22 18:43:17,086 DEBUG HandlerThread:1063 [system_info.py:probe():198] Probing system done
30
+ 2024-05-22 18:43:17,086 DEBUG HandlerThread:1063 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:43:17.073766', 'startedAt': '2024-05-22T18:43:16.472541', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4363875000004, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63845443725586}}, 'memory': {'total': 1007.4379997253418}}
31
+ 2024-05-22 18:43:17,087 INFO HandlerThread:1063 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-22 18:43:17,087 INFO HandlerThread:1063 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-22 18:43:17,090 INFO HandlerThread:1063 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-22 18:43:17,095 DEBUG SenderThread:1063 [sender.py:send():378] send: files
35
+ 2024-05-22 18:43:17,095 INFO SenderThread:1063 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-22 18:43:17,292 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-22 18:43:17,292 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-22 18:43:17,293 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-22 18:43:17,294 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-22 18:43:17,410 DEBUG SenderThread:1063 [sender.py:send():378] send: telemetry
41
+ 2024-05-22 18:43:17,651 INFO wandb-upload_0:1063 [upload_job.py:push():130] Uploaded file /tmp/tmpenbd99b_wandb/78j0rr5j-wandb-metadata.json
42
+ 2024-05-22 18:43:17,843 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log
43
+ 2024-05-22 18:43:17,844 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-metadata.json
44
+ 2024-05-22 18:43:17,844 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/requirements.txt
45
+ 2024-05-22 18:43:19,843 INFO Thread-12 :1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log
46
+ 2024-05-22 18:43:22,415 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-22 18:43:27,819 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-22 18:43:27,851 INFO Thread-12 :1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log
49
+ 2024-05-22 18:43:28,335 DEBUG SenderThread:1063 [sender.py:send():378] send: exit
50
+ 2024-05-22 18:43:28,335 INFO SenderThread:1063 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-22 18:43:28,336 INFO SenderThread:1063 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-22 18:43:28,337 INFO SenderThread:1063 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-22 18:43:28,337 INFO SenderThread:1063 [sender.py:send_exit():593] send defer
54
+ 2024-05-22 18:43:28,337 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-22 18:43:28,337 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-22 18:43:28,337 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-22 18:43:28,338 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-22 18:43:28,338 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-22 18:43:28,338 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-22 18:43:28,338 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-22 18:43:28,338 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-22 18:43:28,338 INFO HandlerThread:1063 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-22 18:43:28,338 DEBUG SystemMonitor:1063 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-22 18:43:28,338 DEBUG SystemMonitor:1063 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-22 18:43:28,338 DEBUG SystemMonitor:1063 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-22 18:43:28,340 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-22 18:43:28,340 DEBUG SenderThread:1063 [sender.py:send():378] send: stats
78
+ 2024-05-22 18:43:28,340 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-22 18:43:28,340 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-22 18:43:28,340 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-22 18:43:28,340 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-22 18:43:28,340 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-22 18:43:28,341 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-22 18:43:28,341 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-22 18:43:28,341 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-22 18:43:28,341 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-22 18:43:28,341 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-22 18:43:28,341 DEBUG SenderThread:1063 [sender.py:send():378] send: summary
91
+ 2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-22 18:43:28,342 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-22 18:43:28,342 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-22 18:43:28,342 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-22 18:43:28,342 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-22 18:43:28,346 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-22 18:43:28,428 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-22 18:43:28,428 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-22 18:43:28,428 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-22 18:43:28,428 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-22 18:43:28,428 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-22 18:43:28,853 INFO Thread-12 :1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/config.yaml
106
+ 2024-05-22 18:43:28,853 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-summary.json
107
+ 2024-05-22 18:43:29,335 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-22 18:43:29,438 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-22 18:43:29,438 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-22 18:43:29,438 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-22 18:43:29,438 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-22 18:43:29,438 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-22 18:43:29,438 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-22 18:43:29,438 INFO SenderThread:1063 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-22 18:43:29,439 INFO SenderThread:1063 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-22 18:43:29,439 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-22 18:43:29,439 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-22 18:43:29,439 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-22 18:43:29,439 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-22 18:43:29,439 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-22 18:43:29,439 INFO SenderThread:1063 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-22 18:43:29,854 INFO SenderThread:1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log
123
+ 2024-05-22 18:43:29,854 INFO SenderThread:1063 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files
124
+ 2024-05-22 18:43:29,855 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-metadata.json wandb-metadata.json
125
+ 2024-05-22 18:43:29,855 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/requirements.txt requirements.txt
126
+ 2024-05-22 18:43:29,855 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-summary.json wandb-summary.json
127
+ 2024-05-22 18:43:29,857 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/config.yaml config.yaml
128
+ 2024-05-22 18:43:29,859 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log output.log
129
+ 2024-05-22 18:43:29,859 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-22 18:43:29,860 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-22 18:43:29,860 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-22 18:43:29,860 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-22 18:43:29,860 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-22 18:43:29,860 INFO SenderThread:1063 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-22 18:43:30,112 INFO wandb-upload_0:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/requirements.txt
136
+ 2024-05-22 18:43:30,335 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit
137
+ 2024-05-22 18:43:30,336 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit
138
+ 2024-05-22 18:43:30,442 INFO wandb-upload_2:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/config.yaml
139
+ 2024-05-22 18:43:30,474 INFO wandb-upload_3:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log
140
+ 2024-05-22 18:43:30,487 INFO wandb-upload_1:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-summary.json
141
+ 2024-05-22 18:43:30,687 INFO Thread-11 (_thread_body):1063 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-22 18:43:30,687 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-22 18:43:30,687 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-22 18:43:30,688 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-22 18:43:30,688 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-22 18:43:30,688 INFO SenderThread:1063 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-22 18:43:30,688 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-22 18:43:30,688 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-22 18:43:30,688 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-22 18:43:30,688 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-22 18:43:30,688 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-22 18:43:30,688 INFO SenderThread:1063 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-22 18:43:30,748 INFO SenderThread:1063 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-22 18:43:30,748 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-22 18:43:30,748 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-22 18:43:30,748 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-22 18:43:30,749 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-22 18:43:30,749 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-22 18:43:30,749 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-22 18:43:30,749 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send():378] send: final
163
+ 2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send():378] send: footer
164
+ 2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-22 18:43:30,749 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit
168
+ 2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: server_info
169
+ 2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: get_summary
170
+ 2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: sampled_history
171
+ 2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: internal_messages
172
+ 2024-05-22 18:43:30,751 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit
173
+ 2024-05-22 18:43:30,751 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-22 18:43:30,751 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-22 18:43:30,813 INFO MainThread:1063 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-22 18:43:30,813 INFO MainThread:1063 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-22 18:43:30,813 INFO MainThread:1063 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-22 18:43:30,814 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-22 18:43:30,814 INFO HandlerThread:1063 [handler.py:finish():882] shutting down handler
180
+ 2024-05-22 18:43:31,751 INFO WriterThread:1063 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb
181
+ 2024-05-22 18:43:31,813 INFO SenderThread:1063 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-22 18:43:31,813 INFO SenderThread:1063 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-22 18:43:31,813 INFO SenderThread:1063 [file_pusher.py:join():175] waiting for file pusher
venv/lib/python3.10/site-packages/transformers/models/auto/__init__.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "auto_factory": ["get_values"],
28
+ "configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"],
29
+ "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"],
30
+ "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"],
31
+ "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"],
32
+ "tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_torch_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_auto"] = [
42
+ "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
43
+ "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING",
44
+ "MODEL_FOR_AUDIO_XVECTOR_MAPPING",
45
+ "MODEL_FOR_BACKBONE_MAPPING",
46
+ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING",
47
+ "MODEL_FOR_CAUSAL_LM_MAPPING",
48
+ "MODEL_FOR_CTC_MAPPING",
49
+ "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
50
+ "MODEL_FOR_DEPTH_ESTIMATION_MAPPING",
51
+ "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
52
+ "MODEL_FOR_IMAGE_MAPPING",
53
+ "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
54
+ "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING",
55
+ "MODEL_FOR_KEYPOINT_DETECTION_MAPPING",
56
+ "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
57
+ "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
58
+ "MODEL_FOR_MASKED_LM_MAPPING",
59
+ "MODEL_FOR_MASK_GENERATION_MAPPING",
60
+ "MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
61
+ "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
62
+ "MODEL_FOR_OBJECT_DETECTION_MAPPING",
63
+ "MODEL_FOR_PRETRAINING_MAPPING",
64
+ "MODEL_FOR_QUESTION_ANSWERING_MAPPING",
65
+ "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
66
+ "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
67
+ "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
68
+ "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
69
+ "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
70
+ "MODEL_FOR_TEXT_ENCODING_MAPPING",
71
+ "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING",
72
+ "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING",
73
+ "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
74
+ "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING",
75
+ "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING",
76
+ "MODEL_FOR_VISION_2_SEQ_MAPPING",
77
+ "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING",
78
+ "MODEL_MAPPING",
79
+ "MODEL_WITH_LM_HEAD_MAPPING",
80
+ "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING",
81
+ "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
82
+ "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING",
83
+ "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING",
84
+ "AutoModel",
85
+ "AutoBackbone",
86
+ "AutoModelForAudioClassification",
87
+ "AutoModelForAudioFrameClassification",
88
+ "AutoModelForAudioXVector",
89
+ "AutoModelForCausalLM",
90
+ "AutoModelForCTC",
91
+ "AutoModelForDepthEstimation",
92
+ "AutoModelForImageClassification",
93
+ "AutoModelForImageSegmentation",
94
+ "AutoModelForImageToImage",
95
+ "AutoModelForInstanceSegmentation",
96
+ "AutoModelForKeypointDetection",
97
+ "AutoModelForMaskGeneration",
98
+ "AutoModelForTextEncoding",
99
+ "AutoModelForMaskedImageModeling",
100
+ "AutoModelForMaskedLM",
101
+ "AutoModelForMultipleChoice",
102
+ "AutoModelForNextSentencePrediction",
103
+ "AutoModelForObjectDetection",
104
+ "AutoModelForPreTraining",
105
+ "AutoModelForQuestionAnswering",
106
+ "AutoModelForSemanticSegmentation",
107
+ "AutoModelForSeq2SeqLM",
108
+ "AutoModelForSequenceClassification",
109
+ "AutoModelForSpeechSeq2Seq",
110
+ "AutoModelForTableQuestionAnswering",
111
+ "AutoModelForTextToSpectrogram",
112
+ "AutoModelForTextToWaveform",
113
+ "AutoModelForTokenClassification",
114
+ "AutoModelForUniversalSegmentation",
115
+ "AutoModelForVideoClassification",
116
+ "AutoModelForVision2Seq",
117
+ "AutoModelForVisualQuestionAnswering",
118
+ "AutoModelForDocumentQuestionAnswering",
119
+ "AutoModelWithLMHead",
120
+ "AutoModelForZeroShotImageClassification",
121
+ "AutoModelForZeroShotObjectDetection",
122
+ ]
123
+
124
+ try:
125
+ if not is_tf_available():
126
+ raise OptionalDependencyNotAvailable()
127
+ except OptionalDependencyNotAvailable:
128
+ pass
129
+ else:
130
+ _import_structure["modeling_tf_auto"] = [
131
+ "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
132
+ "TF_MODEL_FOR_CAUSAL_LM_MAPPING",
133
+ "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
134
+ "TF_MODEL_FOR_MASK_GENERATION_MAPPING",
135
+ "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
136
+ "TF_MODEL_FOR_MASKED_LM_MAPPING",
137
+ "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
138
+ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
139
+ "TF_MODEL_FOR_PRETRAINING_MAPPING",
140
+ "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
141
+ "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
142
+ "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
143
+ "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
144
+ "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
145
+ "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
146
+ "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
147
+ "TF_MODEL_FOR_TEXT_ENCODING_MAPPING",
148
+ "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
149
+ "TF_MODEL_FOR_VISION_2_SEQ_MAPPING",
150
+ "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING",
151
+ "TF_MODEL_MAPPING",
152
+ "TF_MODEL_WITH_LM_HEAD_MAPPING",
153
+ "TFAutoModel",
154
+ "TFAutoModelForAudioClassification",
155
+ "TFAutoModelForCausalLM",
156
+ "TFAutoModelForImageClassification",
157
+ "TFAutoModelForMaskedImageModeling",
158
+ "TFAutoModelForMaskedLM",
159
+ "TFAutoModelForMaskGeneration",
160
+ "TFAutoModelForMultipleChoice",
161
+ "TFAutoModelForNextSentencePrediction",
162
+ "TFAutoModelForPreTraining",
163
+ "TFAutoModelForDocumentQuestionAnswering",
164
+ "TFAutoModelForQuestionAnswering",
165
+ "TFAutoModelForSemanticSegmentation",
166
+ "TFAutoModelForSeq2SeqLM",
167
+ "TFAutoModelForSequenceClassification",
168
+ "TFAutoModelForSpeechSeq2Seq",
169
+ "TFAutoModelForTableQuestionAnswering",
170
+ "TFAutoModelForTextEncoding",
171
+ "TFAutoModelForTokenClassification",
172
+ "TFAutoModelForVision2Seq",
173
+ "TFAutoModelForZeroShotImageClassification",
174
+ "TFAutoModelWithLMHead",
175
+ ]
176
+
177
+ try:
178
+ if not is_flax_available():
179
+ raise OptionalDependencyNotAvailable()
180
+ except OptionalDependencyNotAvailable:
181
+ pass
182
+ else:
183
+ _import_structure["modeling_flax_auto"] = [
184
+ "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
185
+ "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING",
186
+ "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
187
+ "FLAX_MODEL_FOR_MASKED_LM_MAPPING",
188
+ "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
189
+ "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
190
+ "FLAX_MODEL_FOR_PRETRAINING_MAPPING",
191
+ "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
192
+ "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
193
+ "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
194
+ "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
195
+ "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
196
+ "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING",
197
+ "FLAX_MODEL_MAPPING",
198
+ "FlaxAutoModel",
199
+ "FlaxAutoModelForCausalLM",
200
+ "FlaxAutoModelForImageClassification",
201
+ "FlaxAutoModelForMaskedLM",
202
+ "FlaxAutoModelForMultipleChoice",
203
+ "FlaxAutoModelForNextSentencePrediction",
204
+ "FlaxAutoModelForPreTraining",
205
+ "FlaxAutoModelForQuestionAnswering",
206
+ "FlaxAutoModelForSeq2SeqLM",
207
+ "FlaxAutoModelForSequenceClassification",
208
+ "FlaxAutoModelForSpeechSeq2Seq",
209
+ "FlaxAutoModelForTokenClassification",
210
+ "FlaxAutoModelForVision2Seq",
211
+ ]
212
+
213
+
214
+ if TYPE_CHECKING:
215
+ from .auto_factory import get_values
216
+ from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig
217
+ from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
218
+ from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
219
+ from .processing_auto import PROCESSOR_MAPPING, AutoProcessor
220
+ from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
221
+
222
+ try:
223
+ if not is_torch_available():
224
+ raise OptionalDependencyNotAvailable()
225
+ except OptionalDependencyNotAvailable:
226
+ pass
227
+ else:
228
+ from .modeling_auto import (
229
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
230
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING,
231
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING,
232
+ MODEL_FOR_BACKBONE_MAPPING,
233
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
234
+ MODEL_FOR_CAUSAL_LM_MAPPING,
235
+ MODEL_FOR_CTC_MAPPING,
236
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING,
237
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
238
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
239
+ MODEL_FOR_IMAGE_MAPPING,
240
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
241
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING,
242
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
243
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING,
244
+ MODEL_FOR_MASK_GENERATION_MAPPING,
245
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
246
+ MODEL_FOR_MASKED_LM_MAPPING,
247
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
248
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
249
+ MODEL_FOR_OBJECT_DETECTION_MAPPING,
250
+ MODEL_FOR_PRETRAINING_MAPPING,
251
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING,
252
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
253
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
254
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
255
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
256
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
257
+ MODEL_FOR_TEXT_ENCODING_MAPPING,
258
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING,
259
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING,
260
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING,
261
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING,
262
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
263
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING,
264
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
265
+ MODEL_FOR_VISION_2_SEQ_MAPPING,
266
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
267
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING,
268
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING,
269
+ MODEL_MAPPING,
270
+ MODEL_WITH_LM_HEAD_MAPPING,
271
+ AutoBackbone,
272
+ AutoModel,
273
+ AutoModelForAudioClassification,
274
+ AutoModelForAudioFrameClassification,
275
+ AutoModelForAudioXVector,
276
+ AutoModelForCausalLM,
277
+ AutoModelForCTC,
278
+ AutoModelForDepthEstimation,
279
+ AutoModelForDocumentQuestionAnswering,
280
+ AutoModelForImageClassification,
281
+ AutoModelForImageSegmentation,
282
+ AutoModelForImageToImage,
283
+ AutoModelForInstanceSegmentation,
284
+ AutoModelForKeypointDetection,
285
+ AutoModelForMaskedImageModeling,
286
+ AutoModelForMaskedLM,
287
+ AutoModelForMaskGeneration,
288
+ AutoModelForMultipleChoice,
289
+ AutoModelForNextSentencePrediction,
290
+ AutoModelForObjectDetection,
291
+ AutoModelForPreTraining,
292
+ AutoModelForQuestionAnswering,
293
+ AutoModelForSemanticSegmentation,
294
+ AutoModelForSeq2SeqLM,
295
+ AutoModelForSequenceClassification,
296
+ AutoModelForSpeechSeq2Seq,
297
+ AutoModelForTableQuestionAnswering,
298
+ AutoModelForTextEncoding,
299
+ AutoModelForTextToSpectrogram,
300
+ AutoModelForTextToWaveform,
301
+ AutoModelForTokenClassification,
302
+ AutoModelForUniversalSegmentation,
303
+ AutoModelForVideoClassification,
304
+ AutoModelForVision2Seq,
305
+ AutoModelForVisualQuestionAnswering,
306
+ AutoModelForZeroShotImageClassification,
307
+ AutoModelForZeroShotObjectDetection,
308
+ AutoModelWithLMHead,
309
+ )
310
+
311
+ try:
312
+ if not is_tf_available():
313
+ raise OptionalDependencyNotAvailable()
314
+ except OptionalDependencyNotAvailable:
315
+ pass
316
+ else:
317
+ from .modeling_tf_auto import (
318
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
319
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING,
320
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
321
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
322
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING,
323
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
324
+ TF_MODEL_FOR_MASKED_LM_MAPPING,
325
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
326
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
327
+ TF_MODEL_FOR_PRETRAINING_MAPPING,
328
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
329
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
330
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
331
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
332
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
333
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
334
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING,
335
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
336
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
337
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING,
338
+ TF_MODEL_MAPPING,
339
+ TF_MODEL_WITH_LM_HEAD_MAPPING,
340
+ TFAutoModel,
341
+ TFAutoModelForAudioClassification,
342
+ TFAutoModelForCausalLM,
343
+ TFAutoModelForDocumentQuestionAnswering,
344
+ TFAutoModelForImageClassification,
345
+ TFAutoModelForMaskedImageModeling,
346
+ TFAutoModelForMaskedLM,
347
+ TFAutoModelForMaskGeneration,
348
+ TFAutoModelForMultipleChoice,
349
+ TFAutoModelForNextSentencePrediction,
350
+ TFAutoModelForPreTraining,
351
+ TFAutoModelForQuestionAnswering,
352
+ TFAutoModelForSemanticSegmentation,
353
+ TFAutoModelForSeq2SeqLM,
354
+ TFAutoModelForSequenceClassification,
355
+ TFAutoModelForSpeechSeq2Seq,
356
+ TFAutoModelForTableQuestionAnswering,
357
+ TFAutoModelForTextEncoding,
358
+ TFAutoModelForTokenClassification,
359
+ TFAutoModelForVision2Seq,
360
+ TFAutoModelForZeroShotImageClassification,
361
+ TFAutoModelWithLMHead,
362
+ )
363
+
364
+ try:
365
+ if not is_flax_available():
366
+ raise OptionalDependencyNotAvailable()
367
+ except OptionalDependencyNotAvailable:
368
+ pass
369
+ else:
370
+ from .modeling_flax_auto import (
371
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
372
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
373
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
374
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING,
375
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
376
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
377
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING,
378
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
379
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
380
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
381
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
382
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
383
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
384
+ FLAX_MODEL_MAPPING,
385
+ FlaxAutoModel,
386
+ FlaxAutoModelForCausalLM,
387
+ FlaxAutoModelForImageClassification,
388
+ FlaxAutoModelForMaskedLM,
389
+ FlaxAutoModelForMultipleChoice,
390
+ FlaxAutoModelForNextSentencePrediction,
391
+ FlaxAutoModelForPreTraining,
392
+ FlaxAutoModelForQuestionAnswering,
393
+ FlaxAutoModelForSeq2SeqLM,
394
+ FlaxAutoModelForSequenceClassification,
395
+ FlaxAutoModelForSpeechSeq2Seq,
396
+ FlaxAutoModelForTokenClassification,
397
+ FlaxAutoModelForVision2Seq,
398
+ )
399
+
400
+ else:
401
+ import sys
402
+
403
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py ADDED
@@ -0,0 +1,984 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Config class."""
16
+ import importlib
17
+ import os
18
+ import re
19
+ import warnings
20
+ from collections import OrderedDict
21
+ from typing import List, Union
22
+
23
+ from ...configuration_utils import PretrainedConfig
24
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
25
+ from ...utils import CONFIG_NAME, logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import CONFIG_ARCHIVE_MAP_MAPPING_NAMES # noqa: F401, E402
32
+
33
+
34
+ CONFIG_MAPPING_NAMES = OrderedDict(
35
+ [
36
+ # Add configs here
37
+ ("albert", "AlbertConfig"),
38
+ ("align", "AlignConfig"),
39
+ ("altclip", "AltCLIPConfig"),
40
+ ("audio-spectrogram-transformer", "ASTConfig"),
41
+ ("autoformer", "AutoformerConfig"),
42
+ ("bark", "BarkConfig"),
43
+ ("bart", "BartConfig"),
44
+ ("beit", "BeitConfig"),
45
+ ("bert", "BertConfig"),
46
+ ("bert-generation", "BertGenerationConfig"),
47
+ ("big_bird", "BigBirdConfig"),
48
+ ("bigbird_pegasus", "BigBirdPegasusConfig"),
49
+ ("biogpt", "BioGptConfig"),
50
+ ("bit", "BitConfig"),
51
+ ("blenderbot", "BlenderbotConfig"),
52
+ ("blenderbot-small", "BlenderbotSmallConfig"),
53
+ ("blip", "BlipConfig"),
54
+ ("blip-2", "Blip2Config"),
55
+ ("bloom", "BloomConfig"),
56
+ ("bridgetower", "BridgeTowerConfig"),
57
+ ("bros", "BrosConfig"),
58
+ ("camembert", "CamembertConfig"),
59
+ ("canine", "CanineConfig"),
60
+ ("chinese_clip", "ChineseCLIPConfig"),
61
+ ("chinese_clip_vision_model", "ChineseCLIPVisionConfig"),
62
+ ("clap", "ClapConfig"),
63
+ ("clip", "CLIPConfig"),
64
+ ("clip_vision_model", "CLIPVisionConfig"),
65
+ ("clipseg", "CLIPSegConfig"),
66
+ ("clvp", "ClvpConfig"),
67
+ ("code_llama", "LlamaConfig"),
68
+ ("codegen", "CodeGenConfig"),
69
+ ("cohere", "CohereConfig"),
70
+ ("conditional_detr", "ConditionalDetrConfig"),
71
+ ("convbert", "ConvBertConfig"),
72
+ ("convnext", "ConvNextConfig"),
73
+ ("convnextv2", "ConvNextV2Config"),
74
+ ("cpmant", "CpmAntConfig"),
75
+ ("ctrl", "CTRLConfig"),
76
+ ("cvt", "CvtConfig"),
77
+ ("data2vec-audio", "Data2VecAudioConfig"),
78
+ ("data2vec-text", "Data2VecTextConfig"),
79
+ ("data2vec-vision", "Data2VecVisionConfig"),
80
+ ("dbrx", "DbrxConfig"),
81
+ ("deberta", "DebertaConfig"),
82
+ ("deberta-v2", "DebertaV2Config"),
83
+ ("decision_transformer", "DecisionTransformerConfig"),
84
+ ("deformable_detr", "DeformableDetrConfig"),
85
+ ("deit", "DeiTConfig"),
86
+ ("depth_anything", "DepthAnythingConfig"),
87
+ ("deta", "DetaConfig"),
88
+ ("detr", "DetrConfig"),
89
+ ("dinat", "DinatConfig"),
90
+ ("dinov2", "Dinov2Config"),
91
+ ("distilbert", "DistilBertConfig"),
92
+ ("donut-swin", "DonutSwinConfig"),
93
+ ("dpr", "DPRConfig"),
94
+ ("dpt", "DPTConfig"),
95
+ ("efficientformer", "EfficientFormerConfig"),
96
+ ("efficientnet", "EfficientNetConfig"),
97
+ ("electra", "ElectraConfig"),
98
+ ("encodec", "EncodecConfig"),
99
+ ("encoder-decoder", "EncoderDecoderConfig"),
100
+ ("ernie", "ErnieConfig"),
101
+ ("ernie_m", "ErnieMConfig"),
102
+ ("esm", "EsmConfig"),
103
+ ("falcon", "FalconConfig"),
104
+ ("fastspeech2_conformer", "FastSpeech2ConformerConfig"),
105
+ ("flaubert", "FlaubertConfig"),
106
+ ("flava", "FlavaConfig"),
107
+ ("fnet", "FNetConfig"),
108
+ ("focalnet", "FocalNetConfig"),
109
+ ("fsmt", "FSMTConfig"),
110
+ ("funnel", "FunnelConfig"),
111
+ ("fuyu", "FuyuConfig"),
112
+ ("gemma", "GemmaConfig"),
113
+ ("git", "GitConfig"),
114
+ ("glpn", "GLPNConfig"),
115
+ ("gpt-sw3", "GPT2Config"),
116
+ ("gpt2", "GPT2Config"),
117
+ ("gpt_bigcode", "GPTBigCodeConfig"),
118
+ ("gpt_neo", "GPTNeoConfig"),
119
+ ("gpt_neox", "GPTNeoXConfig"),
120
+ ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"),
121
+ ("gptj", "GPTJConfig"),
122
+ ("gptsan-japanese", "GPTSanJapaneseConfig"),
123
+ ("graphormer", "GraphormerConfig"),
124
+ ("grounding-dino", "GroundingDinoConfig"),
125
+ ("groupvit", "GroupViTConfig"),
126
+ ("hubert", "HubertConfig"),
127
+ ("ibert", "IBertConfig"),
128
+ ("idefics", "IdeficsConfig"),
129
+ ("idefics2", "Idefics2Config"),
130
+ ("imagegpt", "ImageGPTConfig"),
131
+ ("informer", "InformerConfig"),
132
+ ("instructblip", "InstructBlipConfig"),
133
+ ("jamba", "JambaConfig"),
134
+ ("jukebox", "JukeboxConfig"),
135
+ ("kosmos-2", "Kosmos2Config"),
136
+ ("layoutlm", "LayoutLMConfig"),
137
+ ("layoutlmv2", "LayoutLMv2Config"),
138
+ ("layoutlmv3", "LayoutLMv3Config"),
139
+ ("led", "LEDConfig"),
140
+ ("levit", "LevitConfig"),
141
+ ("lilt", "LiltConfig"),
142
+ ("llama", "LlamaConfig"),
143
+ ("llava", "LlavaConfig"),
144
+ ("llava_next", "LlavaNextConfig"),
145
+ ("longformer", "LongformerConfig"),
146
+ ("longt5", "LongT5Config"),
147
+ ("luke", "LukeConfig"),
148
+ ("lxmert", "LxmertConfig"),
149
+ ("m2m_100", "M2M100Config"),
150
+ ("mamba", "MambaConfig"),
151
+ ("marian", "MarianConfig"),
152
+ ("markuplm", "MarkupLMConfig"),
153
+ ("mask2former", "Mask2FormerConfig"),
154
+ ("maskformer", "MaskFormerConfig"),
155
+ ("maskformer-swin", "MaskFormerSwinConfig"),
156
+ ("mbart", "MBartConfig"),
157
+ ("mctct", "MCTCTConfig"),
158
+ ("mega", "MegaConfig"),
159
+ ("megatron-bert", "MegatronBertConfig"),
160
+ ("mgp-str", "MgpstrConfig"),
161
+ ("mistral", "MistralConfig"),
162
+ ("mixtral", "MixtralConfig"),
163
+ ("mobilebert", "MobileBertConfig"),
164
+ ("mobilenet_v1", "MobileNetV1Config"),
165
+ ("mobilenet_v2", "MobileNetV2Config"),
166
+ ("mobilevit", "MobileViTConfig"),
167
+ ("mobilevitv2", "MobileViTV2Config"),
168
+ ("mpnet", "MPNetConfig"),
169
+ ("mpt", "MptConfig"),
170
+ ("mra", "MraConfig"),
171
+ ("mt5", "MT5Config"),
172
+ ("musicgen", "MusicgenConfig"),
173
+ ("musicgen_melody", "MusicgenMelodyConfig"),
174
+ ("mvp", "MvpConfig"),
175
+ ("nat", "NatConfig"),
176
+ ("nezha", "NezhaConfig"),
177
+ ("nllb-moe", "NllbMoeConfig"),
178
+ ("nougat", "VisionEncoderDecoderConfig"),
179
+ ("nystromformer", "NystromformerConfig"),
180
+ ("olmo", "OlmoConfig"),
181
+ ("oneformer", "OneFormerConfig"),
182
+ ("open-llama", "OpenLlamaConfig"),
183
+ ("openai-gpt", "OpenAIGPTConfig"),
184
+ ("opt", "OPTConfig"),
185
+ ("owlv2", "Owlv2Config"),
186
+ ("owlvit", "OwlViTConfig"),
187
+ ("patchtsmixer", "PatchTSMixerConfig"),
188
+ ("patchtst", "PatchTSTConfig"),
189
+ ("pegasus", "PegasusConfig"),
190
+ ("pegasus_x", "PegasusXConfig"),
191
+ ("perceiver", "PerceiverConfig"),
192
+ ("persimmon", "PersimmonConfig"),
193
+ ("phi", "PhiConfig"),
194
+ ("pix2struct", "Pix2StructConfig"),
195
+ ("plbart", "PLBartConfig"),
196
+ ("poolformer", "PoolFormerConfig"),
197
+ ("pop2piano", "Pop2PianoConfig"),
198
+ ("prophetnet", "ProphetNetConfig"),
199
+ ("pvt", "PvtConfig"),
200
+ ("pvt_v2", "PvtV2Config"),
201
+ ("qdqbert", "QDQBertConfig"),
202
+ ("qwen2", "Qwen2Config"),
203
+ ("qwen2_moe", "Qwen2MoeConfig"),
204
+ ("rag", "RagConfig"),
205
+ ("realm", "RealmConfig"),
206
+ ("recurrent_gemma", "RecurrentGemmaConfig"),
207
+ ("reformer", "ReformerConfig"),
208
+ ("regnet", "RegNetConfig"),
209
+ ("rembert", "RemBertConfig"),
210
+ ("resnet", "ResNetConfig"),
211
+ ("retribert", "RetriBertConfig"),
212
+ ("roberta", "RobertaConfig"),
213
+ ("roberta-prelayernorm", "RobertaPreLayerNormConfig"),
214
+ ("roc_bert", "RoCBertConfig"),
215
+ ("roformer", "RoFormerConfig"),
216
+ ("rwkv", "RwkvConfig"),
217
+ ("sam", "SamConfig"),
218
+ ("seamless_m4t", "SeamlessM4TConfig"),
219
+ ("seamless_m4t_v2", "SeamlessM4Tv2Config"),
220
+ ("segformer", "SegformerConfig"),
221
+ ("seggpt", "SegGptConfig"),
222
+ ("sew", "SEWConfig"),
223
+ ("sew-d", "SEWDConfig"),
224
+ ("siglip", "SiglipConfig"),
225
+ ("siglip_vision_model", "SiglipVisionConfig"),
226
+ ("speech-encoder-decoder", "SpeechEncoderDecoderConfig"),
227
+ ("speech_to_text", "Speech2TextConfig"),
228
+ ("speech_to_text_2", "Speech2Text2Config"),
229
+ ("speecht5", "SpeechT5Config"),
230
+ ("splinter", "SplinterConfig"),
231
+ ("squeezebert", "SqueezeBertConfig"),
232
+ ("stablelm", "StableLmConfig"),
233
+ ("starcoder2", "Starcoder2Config"),
234
+ ("superpoint", "SuperPointConfig"),
235
+ ("swiftformer", "SwiftFormerConfig"),
236
+ ("swin", "SwinConfig"),
237
+ ("swin2sr", "Swin2SRConfig"),
238
+ ("swinv2", "Swinv2Config"),
239
+ ("switch_transformers", "SwitchTransformersConfig"),
240
+ ("t5", "T5Config"),
241
+ ("table-transformer", "TableTransformerConfig"),
242
+ ("tapas", "TapasConfig"),
243
+ ("time_series_transformer", "TimeSeriesTransformerConfig"),
244
+ ("timesformer", "TimesformerConfig"),
245
+ ("timm_backbone", "TimmBackboneConfig"),
246
+ ("trajectory_transformer", "TrajectoryTransformerConfig"),
247
+ ("transfo-xl", "TransfoXLConfig"),
248
+ ("trocr", "TrOCRConfig"),
249
+ ("tvlt", "TvltConfig"),
250
+ ("tvp", "TvpConfig"),
251
+ ("udop", "UdopConfig"),
252
+ ("umt5", "UMT5Config"),
253
+ ("unispeech", "UniSpeechConfig"),
254
+ ("unispeech-sat", "UniSpeechSatConfig"),
255
+ ("univnet", "UnivNetConfig"),
256
+ ("upernet", "UperNetConfig"),
257
+ ("van", "VanConfig"),
258
+ ("videomae", "VideoMAEConfig"),
259
+ ("vilt", "ViltConfig"),
260
+ ("vipllava", "VipLlavaConfig"),
261
+ ("vision-encoder-decoder", "VisionEncoderDecoderConfig"),
262
+ ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"),
263
+ ("visual_bert", "VisualBertConfig"),
264
+ ("vit", "ViTConfig"),
265
+ ("vit_hybrid", "ViTHybridConfig"),
266
+ ("vit_mae", "ViTMAEConfig"),
267
+ ("vit_msn", "ViTMSNConfig"),
268
+ ("vitdet", "VitDetConfig"),
269
+ ("vitmatte", "VitMatteConfig"),
270
+ ("vits", "VitsConfig"),
271
+ ("vivit", "VivitConfig"),
272
+ ("wav2vec2", "Wav2Vec2Config"),
273
+ ("wav2vec2-bert", "Wav2Vec2BertConfig"),
274
+ ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"),
275
+ ("wavlm", "WavLMConfig"),
276
+ ("whisper", "WhisperConfig"),
277
+ ("xclip", "XCLIPConfig"),
278
+ ("xglm", "XGLMConfig"),
279
+ ("xlm", "XLMConfig"),
280
+ ("xlm-prophetnet", "XLMProphetNetConfig"),
281
+ ("xlm-roberta", "XLMRobertaConfig"),
282
+ ("xlm-roberta-xl", "XLMRobertaXLConfig"),
283
+ ("xlnet", "XLNetConfig"),
284
+ ("xmod", "XmodConfig"),
285
+ ("yolos", "YolosConfig"),
286
+ ("yoso", "YosoConfig"),
287
+ ]
288
+ )
289
+
290
+
291
+ MODEL_NAMES_MAPPING = OrderedDict(
292
+ [
293
+ # Add full (and cased) model names here
294
+ ("albert", "ALBERT"),
295
+ ("align", "ALIGN"),
296
+ ("altclip", "AltCLIP"),
297
+ ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"),
298
+ ("autoformer", "Autoformer"),
299
+ ("bark", "Bark"),
300
+ ("bart", "BART"),
301
+ ("barthez", "BARThez"),
302
+ ("bartpho", "BARTpho"),
303
+ ("beit", "BEiT"),
304
+ ("bert", "BERT"),
305
+ ("bert-generation", "Bert Generation"),
306
+ ("bert-japanese", "BertJapanese"),
307
+ ("bertweet", "BERTweet"),
308
+ ("big_bird", "BigBird"),
309
+ ("bigbird_pegasus", "BigBird-Pegasus"),
310
+ ("biogpt", "BioGpt"),
311
+ ("bit", "BiT"),
312
+ ("blenderbot", "Blenderbot"),
313
+ ("blenderbot-small", "BlenderbotSmall"),
314
+ ("blip", "BLIP"),
315
+ ("blip-2", "BLIP-2"),
316
+ ("bloom", "BLOOM"),
317
+ ("bort", "BORT"),
318
+ ("bridgetower", "BridgeTower"),
319
+ ("bros", "BROS"),
320
+ ("byt5", "ByT5"),
321
+ ("camembert", "CamemBERT"),
322
+ ("canine", "CANINE"),
323
+ ("chinese_clip", "Chinese-CLIP"),
324
+ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"),
325
+ ("clap", "CLAP"),
326
+ ("clip", "CLIP"),
327
+ ("clip_vision_model", "CLIPVisionModel"),
328
+ ("clipseg", "CLIPSeg"),
329
+ ("clvp", "CLVP"),
330
+ ("code_llama", "CodeLlama"),
331
+ ("codegen", "CodeGen"),
332
+ ("cohere", "Cohere"),
333
+ ("conditional_detr", "Conditional DETR"),
334
+ ("convbert", "ConvBERT"),
335
+ ("convnext", "ConvNeXT"),
336
+ ("convnextv2", "ConvNeXTV2"),
337
+ ("cpm", "CPM"),
338
+ ("cpmant", "CPM-Ant"),
339
+ ("ctrl", "CTRL"),
340
+ ("cvt", "CvT"),
341
+ ("data2vec-audio", "Data2VecAudio"),
342
+ ("data2vec-text", "Data2VecText"),
343
+ ("data2vec-vision", "Data2VecVision"),
344
+ ("dbrx", "DBRX"),
345
+ ("deberta", "DeBERTa"),
346
+ ("deberta-v2", "DeBERTa-v2"),
347
+ ("decision_transformer", "Decision Transformer"),
348
+ ("deformable_detr", "Deformable DETR"),
349
+ ("deit", "DeiT"),
350
+ ("deplot", "DePlot"),
351
+ ("depth_anything", "Depth Anything"),
352
+ ("deta", "DETA"),
353
+ ("detr", "DETR"),
354
+ ("dialogpt", "DialoGPT"),
355
+ ("dinat", "DiNAT"),
356
+ ("dinov2", "DINOv2"),
357
+ ("distilbert", "DistilBERT"),
358
+ ("dit", "DiT"),
359
+ ("donut-swin", "DonutSwin"),
360
+ ("dpr", "DPR"),
361
+ ("dpt", "DPT"),
362
+ ("efficientformer", "EfficientFormer"),
363
+ ("efficientnet", "EfficientNet"),
364
+ ("electra", "ELECTRA"),
365
+ ("encodec", "EnCodec"),
366
+ ("encoder-decoder", "Encoder decoder"),
367
+ ("ernie", "ERNIE"),
368
+ ("ernie_m", "ErnieM"),
369
+ ("esm", "ESM"),
370
+ ("falcon", "Falcon"),
371
+ ("fastspeech2_conformer", "FastSpeech2Conformer"),
372
+ ("flan-t5", "FLAN-T5"),
373
+ ("flan-ul2", "FLAN-UL2"),
374
+ ("flaubert", "FlauBERT"),
375
+ ("flava", "FLAVA"),
376
+ ("fnet", "FNet"),
377
+ ("focalnet", "FocalNet"),
378
+ ("fsmt", "FairSeq Machine-Translation"),
379
+ ("funnel", "Funnel Transformer"),
380
+ ("fuyu", "Fuyu"),
381
+ ("gemma", "Gemma"),
382
+ ("git", "GIT"),
383
+ ("glpn", "GLPN"),
384
+ ("gpt-sw3", "GPT-Sw3"),
385
+ ("gpt2", "OpenAI GPT-2"),
386
+ ("gpt_bigcode", "GPTBigCode"),
387
+ ("gpt_neo", "GPT Neo"),
388
+ ("gpt_neox", "GPT NeoX"),
389
+ ("gpt_neox_japanese", "GPT NeoX Japanese"),
390
+ ("gptj", "GPT-J"),
391
+ ("gptsan-japanese", "GPTSAN-japanese"),
392
+ ("graphormer", "Graphormer"),
393
+ ("grounding-dino", "Grounding DINO"),
394
+ ("groupvit", "GroupViT"),
395
+ ("herbert", "HerBERT"),
396
+ ("hubert", "Hubert"),
397
+ ("ibert", "I-BERT"),
398
+ ("idefics", "IDEFICS"),
399
+ ("idefics2", "Idefics2"),
400
+ ("imagegpt", "ImageGPT"),
401
+ ("informer", "Informer"),
402
+ ("instructblip", "InstructBLIP"),
403
+ ("jamba", "Jamba"),
404
+ ("jukebox", "Jukebox"),
405
+ ("kosmos-2", "KOSMOS-2"),
406
+ ("layoutlm", "LayoutLM"),
407
+ ("layoutlmv2", "LayoutLMv2"),
408
+ ("layoutlmv3", "LayoutLMv3"),
409
+ ("layoutxlm", "LayoutXLM"),
410
+ ("led", "LED"),
411
+ ("levit", "LeViT"),
412
+ ("lilt", "LiLT"),
413
+ ("llama", "LLaMA"),
414
+ ("llama2", "Llama2"),
415
+ ("llava", "LLaVa"),
416
+ ("llava_next", "LLaVA-NeXT"),
417
+ ("longformer", "Longformer"),
418
+ ("longt5", "LongT5"),
419
+ ("luke", "LUKE"),
420
+ ("lxmert", "LXMERT"),
421
+ ("m2m_100", "M2M100"),
422
+ ("madlad-400", "MADLAD-400"),
423
+ ("mamba", "Mamba"),
424
+ ("marian", "Marian"),
425
+ ("markuplm", "MarkupLM"),
426
+ ("mask2former", "Mask2Former"),
427
+ ("maskformer", "MaskFormer"),
428
+ ("maskformer-swin", "MaskFormerSwin"),
429
+ ("matcha", "MatCha"),
430
+ ("mbart", "mBART"),
431
+ ("mbart50", "mBART-50"),
432
+ ("mctct", "M-CTC-T"),
433
+ ("mega", "MEGA"),
434
+ ("megatron-bert", "Megatron-BERT"),
435
+ ("megatron_gpt2", "Megatron-GPT2"),
436
+ ("mgp-str", "MGP-STR"),
437
+ ("mistral", "Mistral"),
438
+ ("mixtral", "Mixtral"),
439
+ ("mluke", "mLUKE"),
440
+ ("mms", "MMS"),
441
+ ("mobilebert", "MobileBERT"),
442
+ ("mobilenet_v1", "MobileNetV1"),
443
+ ("mobilenet_v2", "MobileNetV2"),
444
+ ("mobilevit", "MobileViT"),
445
+ ("mobilevitv2", "MobileViTV2"),
446
+ ("mpnet", "MPNet"),
447
+ ("mpt", "MPT"),
448
+ ("mra", "MRA"),
449
+ ("mt5", "MT5"),
450
+ ("musicgen", "MusicGen"),
451
+ ("musicgen_melody", "MusicGen Melody"),
452
+ ("mvp", "MVP"),
453
+ ("nat", "NAT"),
454
+ ("nezha", "Nezha"),
455
+ ("nllb", "NLLB"),
456
+ ("nllb-moe", "NLLB-MOE"),
457
+ ("nougat", "Nougat"),
458
+ ("nystromformer", "Nyströmformer"),
459
+ ("olmo", "OLMo"),
460
+ ("oneformer", "OneFormer"),
461
+ ("open-llama", "OpenLlama"),
462
+ ("openai-gpt", "OpenAI GPT"),
463
+ ("opt", "OPT"),
464
+ ("owlv2", "OWLv2"),
465
+ ("owlvit", "OWL-ViT"),
466
+ ("patchtsmixer", "PatchTSMixer"),
467
+ ("patchtst", "PatchTST"),
468
+ ("pegasus", "Pegasus"),
469
+ ("pegasus_x", "PEGASUS-X"),
470
+ ("perceiver", "Perceiver"),
471
+ ("persimmon", "Persimmon"),
472
+ ("phi", "Phi"),
473
+ ("phobert", "PhoBERT"),
474
+ ("pix2struct", "Pix2Struct"),
475
+ ("plbart", "PLBart"),
476
+ ("poolformer", "PoolFormer"),
477
+ ("pop2piano", "Pop2Piano"),
478
+ ("prophetnet", "ProphetNet"),
479
+ ("pvt", "PVT"),
480
+ ("pvt_v2", "PVTv2"),
481
+ ("qdqbert", "QDQBert"),
482
+ ("qwen2", "Qwen2"),
483
+ ("qwen2_moe", "Qwen2MoE"),
484
+ ("rag", "RAG"),
485
+ ("realm", "REALM"),
486
+ ("recurrent_gemma", "RecurrentGemma"),
487
+ ("reformer", "Reformer"),
488
+ ("regnet", "RegNet"),
489
+ ("rembert", "RemBERT"),
490
+ ("resnet", "ResNet"),
491
+ ("retribert", "RetriBERT"),
492
+ ("roberta", "RoBERTa"),
493
+ ("roberta-prelayernorm", "RoBERTa-PreLayerNorm"),
494
+ ("roc_bert", "RoCBert"),
495
+ ("roformer", "RoFormer"),
496
+ ("rwkv", "RWKV"),
497
+ ("sam", "SAM"),
498
+ ("seamless_m4t", "SeamlessM4T"),
499
+ ("seamless_m4t_v2", "SeamlessM4Tv2"),
500
+ ("segformer", "SegFormer"),
501
+ ("seggpt", "SegGPT"),
502
+ ("sew", "SEW"),
503
+ ("sew-d", "SEW-D"),
504
+ ("siglip", "SigLIP"),
505
+ ("siglip_vision_model", "SiglipVisionModel"),
506
+ ("speech-encoder-decoder", "Speech Encoder decoder"),
507
+ ("speech_to_text", "Speech2Text"),
508
+ ("speech_to_text_2", "Speech2Text2"),
509
+ ("speecht5", "SpeechT5"),
510
+ ("splinter", "Splinter"),
511
+ ("squeezebert", "SqueezeBERT"),
512
+ ("stablelm", "StableLm"),
513
+ ("starcoder2", "Starcoder2"),
514
+ ("superpoint", "SuperPoint"),
515
+ ("swiftformer", "SwiftFormer"),
516
+ ("swin", "Swin Transformer"),
517
+ ("swin2sr", "Swin2SR"),
518
+ ("swinv2", "Swin Transformer V2"),
519
+ ("switch_transformers", "SwitchTransformers"),
520
+ ("t5", "T5"),
521
+ ("t5v1.1", "T5v1.1"),
522
+ ("table-transformer", "Table Transformer"),
523
+ ("tapas", "TAPAS"),
524
+ ("tapex", "TAPEX"),
525
+ ("time_series_transformer", "Time Series Transformer"),
526
+ ("timesformer", "TimeSformer"),
527
+ ("timm_backbone", "TimmBackbone"),
528
+ ("trajectory_transformer", "Trajectory Transformer"),
529
+ ("transfo-xl", "Transformer-XL"),
530
+ ("trocr", "TrOCR"),
531
+ ("tvlt", "TVLT"),
532
+ ("tvp", "TVP"),
533
+ ("udop", "UDOP"),
534
+ ("ul2", "UL2"),
535
+ ("umt5", "UMT5"),
536
+ ("unispeech", "UniSpeech"),
537
+ ("unispeech-sat", "UniSpeechSat"),
538
+ ("univnet", "UnivNet"),
539
+ ("upernet", "UPerNet"),
540
+ ("van", "VAN"),
541
+ ("videomae", "VideoMAE"),
542
+ ("vilt", "ViLT"),
543
+ ("vipllava", "VipLlava"),
544
+ ("vision-encoder-decoder", "Vision Encoder decoder"),
545
+ ("vision-text-dual-encoder", "VisionTextDualEncoder"),
546
+ ("visual_bert", "VisualBERT"),
547
+ ("vit", "ViT"),
548
+ ("vit_hybrid", "ViT Hybrid"),
549
+ ("vit_mae", "ViTMAE"),
550
+ ("vit_msn", "ViTMSN"),
551
+ ("vitdet", "VitDet"),
552
+ ("vitmatte", "ViTMatte"),
553
+ ("vits", "VITS"),
554
+ ("vivit", "ViViT"),
555
+ ("wav2vec2", "Wav2Vec2"),
556
+ ("wav2vec2-bert", "Wav2Vec2-BERT"),
557
+ ("wav2vec2-conformer", "Wav2Vec2-Conformer"),
558
+ ("wav2vec2_phoneme", "Wav2Vec2Phoneme"),
559
+ ("wavlm", "WavLM"),
560
+ ("whisper", "Whisper"),
561
+ ("xclip", "X-CLIP"),
562
+ ("xglm", "XGLM"),
563
+ ("xlm", "XLM"),
564
+ ("xlm-prophetnet", "XLM-ProphetNet"),
565
+ ("xlm-roberta", "XLM-RoBERTa"),
566
+ ("xlm-roberta-xl", "XLM-RoBERTa-XL"),
567
+ ("xlm-v", "XLM-V"),
568
+ ("xlnet", "XLNet"),
569
+ ("xls_r", "XLS-R"),
570
+ ("xlsr_wav2vec2", "XLSR-Wav2Vec2"),
571
+ ("xmod", "X-MOD"),
572
+ ("yolos", "YOLOS"),
573
+ ("yoso", "YOSO"),
574
+ ]
575
+ )
576
+
577
+ # This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting
578
+ # `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`.
579
+ DEPRECATED_MODELS = [
580
+ "bort",
581
+ "mctct",
582
+ "mmbt",
583
+ "open_llama",
584
+ "retribert",
585
+ "tapex",
586
+ "trajectory_transformer",
587
+ "transfo_xl",
588
+ "van",
589
+ ]
590
+
591
+ SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict(
592
+ [
593
+ ("openai-gpt", "openai"),
594
+ ("data2vec-audio", "data2vec"),
595
+ ("data2vec-text", "data2vec"),
596
+ ("data2vec-vision", "data2vec"),
597
+ ("donut-swin", "donut"),
598
+ ("kosmos-2", "kosmos2"),
599
+ ("maskformer-swin", "maskformer"),
600
+ ("xclip", "x_clip"),
601
+ ("clip_vision_model", "clip"),
602
+ ("siglip_vision_model", "siglip"),
603
+ ("chinese_clip_vision_model", "chinese_clip"),
604
+ ]
605
+ )
606
+
607
+
608
+ def model_type_to_module_name(key):
609
+ """Converts a config key to the corresponding module."""
610
+ # Special treatment
611
+ if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME:
612
+ return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key]
613
+
614
+ key = key.replace("-", "_")
615
+ if key in DEPRECATED_MODELS:
616
+ key = f"deprecated.{key}"
617
+
618
+ return key
619
+
620
+
621
+ def config_class_to_model_type(config):
622
+ """Converts a config class name to the corresponding model type"""
623
+ for key, cls in CONFIG_MAPPING_NAMES.items():
624
+ if cls == config:
625
+ return key
626
+ # if key not found check in extra content
627
+ for key, cls in CONFIG_MAPPING._extra_content.items():
628
+ if cls.__name__ == config:
629
+ return key
630
+ return None
631
+
632
+
633
+ class _LazyConfigMapping(OrderedDict):
634
+ """
635
+ A dictionary that lazily load its values when they are requested.
636
+ """
637
+
638
+ def __init__(self, mapping):
639
+ self._mapping = mapping
640
+ self._extra_content = {}
641
+ self._modules = {}
642
+
643
+ def __getitem__(self, key):
644
+ if key in self._extra_content:
645
+ return self._extra_content[key]
646
+ if key not in self._mapping:
647
+ raise KeyError(key)
648
+ value = self._mapping[key]
649
+ module_name = model_type_to_module_name(key)
650
+ if module_name not in self._modules:
651
+ self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
652
+ if hasattr(self._modules[module_name], value):
653
+ return getattr(self._modules[module_name], value)
654
+
655
+ # Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the
656
+ # object at the top level.
657
+ transformers_module = importlib.import_module("transformers")
658
+ return getattr(transformers_module, value)
659
+
660
+ def keys(self):
661
+ return list(self._mapping.keys()) + list(self._extra_content.keys())
662
+
663
+ def values(self):
664
+ return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values())
665
+
666
+ def items(self):
667
+ return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items())
668
+
669
+ def __iter__(self):
670
+ return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
671
+
672
+ def __contains__(self, item):
673
+ return item in self._mapping or item in self._extra_content
674
+
675
+ def register(self, key, value, exist_ok=False):
676
+ """
677
+ Register a new configuration in this mapping.
678
+ """
679
+ if key in self._mapping.keys() and not exist_ok:
680
+ raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
681
+ self._extra_content[key] = value
682
+
683
+
684
+ CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES)
685
+
686
+
687
+ class _LazyLoadAllMappings(OrderedDict):
688
+ """
689
+ A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
690
+ etc.)
691
+
692
+ Args:
693
+ mapping: The mapping to load.
694
+ """
695
+
696
+ def __init__(self, mapping):
697
+ self._mapping = mapping
698
+ self._initialized = False
699
+ self._data = {}
700
+
701
+ def _initialize(self):
702
+ if self._initialized:
703
+ return
704
+
705
+ for model_type, map_name in self._mapping.items():
706
+ module_name = model_type_to_module_name(model_type)
707
+ module = importlib.import_module(f".{module_name}", "transformers.models")
708
+ mapping = getattr(module, map_name)
709
+ self._data.update(mapping)
710
+
711
+ self._initialized = True
712
+
713
+ def __getitem__(self, key):
714
+ self._initialize()
715
+ return self._data[key]
716
+
717
+ def keys(self):
718
+ self._initialize()
719
+ return self._data.keys()
720
+
721
+ def values(self):
722
+ self._initialize()
723
+ return self._data.values()
724
+
725
+ def items(self):
726
+ self._initialize()
727
+ return self._data.keys()
728
+
729
+ def __iter__(self):
730
+ self._initialize()
731
+ return iter(self._data)
732
+
733
+ def __contains__(self, item):
734
+ self._initialize()
735
+ return item in self._data
736
+
737
+
738
+ def _get_class_name(model_class: Union[str, List[str]]):
739
+ if isinstance(model_class, (list, tuple)):
740
+ return " or ".join([f"[`{c}`]" for c in model_class if c is not None])
741
+ return f"[`{model_class}`]"
742
+
743
+
744
+ def _list_model_options(indent, config_to_class=None, use_model_types=True):
745
+ if config_to_class is None and not use_model_types:
746
+ raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.")
747
+ if use_model_types:
748
+ if config_to_class is None:
749
+ model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()}
750
+ else:
751
+ model_type_to_name = {
752
+ model_type: _get_class_name(model_class)
753
+ for model_type, model_class in config_to_class.items()
754
+ if model_type in MODEL_NAMES_MAPPING
755
+ }
756
+ lines = [
757
+ f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)"
758
+ for model_type in sorted(model_type_to_name.keys())
759
+ ]
760
+ else:
761
+ config_to_name = {
762
+ CONFIG_MAPPING_NAMES[config]: _get_class_name(clas)
763
+ for config, clas in config_to_class.items()
764
+ if config in CONFIG_MAPPING_NAMES
765
+ }
766
+ config_to_model_name = {
767
+ config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items()
768
+ }
769
+ lines = [
770
+ f"{indent}- [`{config_name}`] configuration class:"
771
+ f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)"
772
+ for config_name in sorted(config_to_name.keys())
773
+ ]
774
+ return "\n".join(lines)
775
+
776
+
777
+ def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True):
778
+ def docstring_decorator(fn):
779
+ docstrings = fn.__doc__
780
+ if docstrings is None:
781
+ # Example: -OO
782
+ return fn
783
+ lines = docstrings.split("\n")
784
+ i = 0
785
+ while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None:
786
+ i += 1
787
+ if i < len(lines):
788
+ indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0]
789
+ if use_model_types:
790
+ indent = f"{indent} "
791
+ lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types)
792
+ docstrings = "\n".join(lines)
793
+ else:
794
+ raise ValueError(
795
+ f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current"
796
+ f" docstring is:\n{docstrings}"
797
+ )
798
+ fn.__doc__ = docstrings
799
+ return fn
800
+
801
+ return docstring_decorator
802
+
803
+
804
+ class AutoConfig:
805
+ r"""
806
+ This is a generic configuration class that will be instantiated as one of the configuration classes of the library
807
+ when created with the [`~AutoConfig.from_pretrained`] class method.
808
+
809
+ This class cannot be instantiated directly using `__init__()` (throws an error).
810
+ """
811
+
812
+ def __init__(self):
813
+ raise EnvironmentError(
814
+ "AutoConfig is designed to be instantiated "
815
+ "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method."
816
+ )
817
+
818
+ @classmethod
819
+ def for_model(cls, model_type: str, *args, **kwargs):
820
+ if model_type in CONFIG_MAPPING:
821
+ config_class = CONFIG_MAPPING[model_type]
822
+ return config_class(*args, **kwargs)
823
+ raise ValueError(
824
+ f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}"
825
+ )
826
+
827
+ @classmethod
828
+ @replace_list_option_in_docstrings()
829
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
830
+ r"""
831
+ Instantiate one of the configuration classes of the library from a pretrained model configuration.
832
+
833
+ The configuration class to instantiate is selected based on the `model_type` property of the config object that
834
+ is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
835
+
836
+ List options
837
+
838
+ Args:
839
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
840
+ Can be either:
841
+
842
+ - A string, the *model id* of a pretrained model configuration hosted inside a model repo on
843
+ huggingface.co.
844
+ - A path to a *directory* containing a configuration file saved using the
845
+ [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
846
+ e.g., `./my_model_directory/`.
847
+ - A path or url to a saved configuration JSON *file*, e.g.,
848
+ `./my_model_directory/configuration.json`.
849
+ cache_dir (`str` or `os.PathLike`, *optional*):
850
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
851
+ standard cache should not be used.
852
+ force_download (`bool`, *optional*, defaults to `False`):
853
+ Whether or not to force the (re-)download the model weights and configuration files and override the
854
+ cached versions if they exist.
855
+ resume_download (`bool`, *optional*, defaults to `False`):
856
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
857
+ file exists.
858
+ proxies (`Dict[str, str]`, *optional*):
859
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
860
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
861
+ revision (`str`, *optional*, defaults to `"main"`):
862
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
863
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
864
+ identifier allowed by git.
865
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
866
+ If `False`, then this function returns just the final configuration object.
867
+
868
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
869
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
870
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
871
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
872
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
873
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
874
+ execute code present on the Hub on your local machine.
875
+ kwargs(additional keyword arguments, *optional*):
876
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
877
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
878
+ by the `return_unused_kwargs` keyword parameter.
879
+
880
+ Examples:
881
+
882
+ ```python
883
+ >>> from transformers import AutoConfig
884
+
885
+ >>> # Download configuration from huggingface.co and cache.
886
+ >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased")
887
+
888
+ >>> # Download configuration from huggingface.co (user-uploaded) and cache.
889
+ >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
890
+
891
+ >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
892
+ >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
893
+
894
+ >>> # Load a specific configuration file.
895
+ >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
896
+
897
+ >>> # Change some config attributes when loading a pretrained config.
898
+ >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
899
+ >>> config.output_attentions
900
+ True
901
+
902
+ >>> config, unused_kwargs = AutoConfig.from_pretrained(
903
+ ... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
904
+ ... )
905
+ >>> config.output_attentions
906
+ True
907
+
908
+ >>> unused_kwargs
909
+ {'foo': False}
910
+ ```"""
911
+ use_auth_token = kwargs.pop("use_auth_token", None)
912
+ if use_auth_token is not None:
913
+ warnings.warn(
914
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
915
+ FutureWarning,
916
+ )
917
+ if kwargs.get("token", None) is not None:
918
+ raise ValueError(
919
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
920
+ )
921
+ kwargs["token"] = use_auth_token
922
+
923
+ kwargs["_from_auto"] = True
924
+ kwargs["name_or_path"] = pretrained_model_name_or_path
925
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
926
+ code_revision = kwargs.pop("code_revision", None)
927
+
928
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
929
+ has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]
930
+ has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING
931
+ trust_remote_code = resolve_trust_remote_code(
932
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
933
+ )
934
+
935
+ if has_remote_code and trust_remote_code:
936
+ class_ref = config_dict["auto_map"]["AutoConfig"]
937
+ config_class = get_class_from_dynamic_module(
938
+ class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
939
+ )
940
+ if os.path.isdir(pretrained_model_name_or_path):
941
+ config_class.register_for_auto_class()
942
+ return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
943
+ elif "model_type" in config_dict:
944
+ try:
945
+ config_class = CONFIG_MAPPING[config_dict["model_type"]]
946
+ except KeyError:
947
+ raise ValueError(
948
+ f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` "
949
+ "but Transformers does not recognize this architecture. This could be because of an "
950
+ "issue with the checkpoint, or because your version of Transformers is out of date."
951
+ )
952
+ return config_class.from_dict(config_dict, **unused_kwargs)
953
+ else:
954
+ # Fallback: use pattern matching on the string.
955
+ # We go from longer names to shorter names to catch roberta before bert (for instance)
956
+ for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True):
957
+ if pattern in str(pretrained_model_name_or_path):
958
+ return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs)
959
+
960
+ raise ValueError(
961
+ f"Unrecognized model in {pretrained_model_name_or_path}. "
962
+ f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings "
963
+ f"in its name: {', '.join(CONFIG_MAPPING.keys())}"
964
+ )
965
+
966
+ @staticmethod
967
+ def register(model_type, config, exist_ok=False):
968
+ """
969
+ Register a new configuration for this class.
970
+
971
+ Args:
972
+ model_type (`str`): The model type like "bert" or "gpt".
973
+ config ([`PretrainedConfig`]): The config to register.
974
+ """
975
+ if issubclass(config, PretrainedConfig) and config.model_type != model_type:
976
+ raise ValueError(
977
+ "The config you are passing has a `model_type` attribute that is not consistent with the model type "
978
+ f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they "
979
+ "match!"
980
+ )
981
+ CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
982
+
983
+
984
+ ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES)
venv/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AutoFeatureExtractor class."""
16
+ import importlib
17
+ import json
18
+ import os
19
+ import warnings
20
+ from collections import OrderedDict
21
+ from typing import Dict, Optional, Union
22
+
23
+ # Build the list of all feature extractors
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...feature_extraction_utils import FeatureExtractionMixin
27
+ from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
28
+ from .auto_factory import _LazyAutoMapping
29
+ from .configuration_auto import (
30
+ CONFIG_MAPPING_NAMES,
31
+ AutoConfig,
32
+ model_type_to_module_name,
33
+ replace_list_option_in_docstrings,
34
+ )
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
40
+ [
41
+ ("audio-spectrogram-transformer", "ASTFeatureExtractor"),
42
+ ("beit", "BeitFeatureExtractor"),
43
+ ("chinese_clip", "ChineseCLIPFeatureExtractor"),
44
+ ("clap", "ClapFeatureExtractor"),
45
+ ("clip", "CLIPFeatureExtractor"),
46
+ ("clipseg", "ViTFeatureExtractor"),
47
+ ("clvp", "ClvpFeatureExtractor"),
48
+ ("conditional_detr", "ConditionalDetrFeatureExtractor"),
49
+ ("convnext", "ConvNextFeatureExtractor"),
50
+ ("cvt", "ConvNextFeatureExtractor"),
51
+ ("data2vec-audio", "Wav2Vec2FeatureExtractor"),
52
+ ("data2vec-vision", "BeitFeatureExtractor"),
53
+ ("deformable_detr", "DeformableDetrFeatureExtractor"),
54
+ ("deit", "DeiTFeatureExtractor"),
55
+ ("detr", "DetrFeatureExtractor"),
56
+ ("dinat", "ViTFeatureExtractor"),
57
+ ("donut-swin", "DonutFeatureExtractor"),
58
+ ("dpt", "DPTFeatureExtractor"),
59
+ ("encodec", "EncodecFeatureExtractor"),
60
+ ("flava", "FlavaFeatureExtractor"),
61
+ ("glpn", "GLPNFeatureExtractor"),
62
+ ("groupvit", "CLIPFeatureExtractor"),
63
+ ("hubert", "Wav2Vec2FeatureExtractor"),
64
+ ("imagegpt", "ImageGPTFeatureExtractor"),
65
+ ("layoutlmv2", "LayoutLMv2FeatureExtractor"),
66
+ ("layoutlmv3", "LayoutLMv3FeatureExtractor"),
67
+ ("levit", "LevitFeatureExtractor"),
68
+ ("maskformer", "MaskFormerFeatureExtractor"),
69
+ ("mctct", "MCTCTFeatureExtractor"),
70
+ ("mobilenet_v1", "MobileNetV1FeatureExtractor"),
71
+ ("mobilenet_v2", "MobileNetV2FeatureExtractor"),
72
+ ("mobilevit", "MobileViTFeatureExtractor"),
73
+ ("nat", "ViTFeatureExtractor"),
74
+ ("owlvit", "OwlViTFeatureExtractor"),
75
+ ("perceiver", "PerceiverFeatureExtractor"),
76
+ ("poolformer", "PoolFormerFeatureExtractor"),
77
+ ("pop2piano", "Pop2PianoFeatureExtractor"),
78
+ ("regnet", "ConvNextFeatureExtractor"),
79
+ ("resnet", "ConvNextFeatureExtractor"),
80
+ ("seamless_m4t", "SeamlessM4TFeatureExtractor"),
81
+ ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"),
82
+ ("segformer", "SegformerFeatureExtractor"),
83
+ ("sew", "Wav2Vec2FeatureExtractor"),
84
+ ("sew-d", "Wav2Vec2FeatureExtractor"),
85
+ ("speech_to_text", "Speech2TextFeatureExtractor"),
86
+ ("speecht5", "SpeechT5FeatureExtractor"),
87
+ ("swiftformer", "ViTFeatureExtractor"),
88
+ ("swin", "ViTFeatureExtractor"),
89
+ ("swinv2", "ViTFeatureExtractor"),
90
+ ("table-transformer", "DetrFeatureExtractor"),
91
+ ("timesformer", "VideoMAEFeatureExtractor"),
92
+ ("tvlt", "TvltFeatureExtractor"),
93
+ ("unispeech", "Wav2Vec2FeatureExtractor"),
94
+ ("unispeech-sat", "Wav2Vec2FeatureExtractor"),
95
+ ("univnet", "UnivNetFeatureExtractor"),
96
+ ("van", "ConvNextFeatureExtractor"),
97
+ ("videomae", "VideoMAEFeatureExtractor"),
98
+ ("vilt", "ViltFeatureExtractor"),
99
+ ("vit", "ViTFeatureExtractor"),
100
+ ("vit_mae", "ViTFeatureExtractor"),
101
+ ("vit_msn", "ViTFeatureExtractor"),
102
+ ("wav2vec2", "Wav2Vec2FeatureExtractor"),
103
+ ("wav2vec2-bert", "Wav2Vec2FeatureExtractor"),
104
+ ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
105
+ ("wavlm", "Wav2Vec2FeatureExtractor"),
106
+ ("whisper", "WhisperFeatureExtractor"),
107
+ ("xclip", "CLIPFeatureExtractor"),
108
+ ("yolos", "YolosFeatureExtractor"),
109
+ ]
110
+ )
111
+
112
+ FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
113
+
114
+
115
+ def feature_extractor_class_from_name(class_name: str):
116
+ for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
117
+ if class_name in extractors:
118
+ module_name = model_type_to_module_name(module_name)
119
+
120
+ module = importlib.import_module(f".{module_name}", "transformers.models")
121
+ try:
122
+ return getattr(module, class_name)
123
+ except AttributeError:
124
+ continue
125
+
126
+ for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
127
+ if getattr(extractor, "__name__", None) == class_name:
128
+ return extractor
129
+
130
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
131
+ # init and we return the proper dummy to get an appropriate error message.
132
+ main_module = importlib.import_module("transformers")
133
+ if hasattr(main_module, class_name):
134
+ return getattr(main_module, class_name)
135
+
136
+ return None
137
+
138
+
139
+ def get_feature_extractor_config(
140
+ pretrained_model_name_or_path: Union[str, os.PathLike],
141
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
142
+ force_download: bool = False,
143
+ resume_download: bool = False,
144
+ proxies: Optional[Dict[str, str]] = None,
145
+ token: Optional[Union[bool, str]] = None,
146
+ revision: Optional[str] = None,
147
+ local_files_only: bool = False,
148
+ **kwargs,
149
+ ):
150
+ """
151
+ Loads the tokenizer configuration from a pretrained model tokenizer configuration.
152
+
153
+ Args:
154
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
155
+ This can be either:
156
+
157
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
158
+ huggingface.co.
159
+ - a path to a *directory* containing a configuration file saved using the
160
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
161
+
162
+ cache_dir (`str` or `os.PathLike`, *optional*):
163
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
164
+ cache should not be used.
165
+ force_download (`bool`, *optional*, defaults to `False`):
166
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
167
+ exist.
168
+ resume_download (`bool`, *optional*, defaults to `False`):
169
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
170
+ proxies (`Dict[str, str]`, *optional*):
171
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
172
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
173
+ token (`str` or *bool*, *optional*):
174
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
175
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
176
+ revision (`str`, *optional*, defaults to `"main"`):
177
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
178
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
179
+ identifier allowed by git.
180
+ local_files_only (`bool`, *optional*, defaults to `False`):
181
+ If `True`, will only try to load the tokenizer configuration from local files.
182
+
183
+ <Tip>
184
+
185
+ Passing `token=True` is required when you want to use a private model.
186
+
187
+ </Tip>
188
+
189
+ Returns:
190
+ `Dict`: The configuration of the tokenizer.
191
+
192
+ Examples:
193
+
194
+ ```python
195
+ # Download configuration from huggingface.co and cache.
196
+ tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
197
+ # This model does not have a tokenizer config so the result will be an empty dict.
198
+ tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
199
+
200
+ # Save a pretrained tokenizer locally and you can reload its config
201
+ from transformers import AutoTokenizer
202
+
203
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
204
+ tokenizer.save_pretrained("tokenizer-test")
205
+ tokenizer_config = get_tokenizer_config("tokenizer-test")
206
+ ```"""
207
+ use_auth_token = kwargs.pop("use_auth_token", None)
208
+ if use_auth_token is not None:
209
+ warnings.warn(
210
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
211
+ FutureWarning,
212
+ )
213
+ if token is not None:
214
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
215
+ token = use_auth_token
216
+
217
+ resolved_config_file = get_file_from_repo(
218
+ pretrained_model_name_or_path,
219
+ FEATURE_EXTRACTOR_NAME,
220
+ cache_dir=cache_dir,
221
+ force_download=force_download,
222
+ resume_download=resume_download,
223
+ proxies=proxies,
224
+ token=token,
225
+ revision=revision,
226
+ local_files_only=local_files_only,
227
+ )
228
+ if resolved_config_file is None:
229
+ logger.info(
230
+ "Could not locate the feature extractor configuration file, will try to use the model config instead."
231
+ )
232
+ return {}
233
+
234
+ with open(resolved_config_file, encoding="utf-8") as reader:
235
+ return json.load(reader)
236
+
237
+
238
+ class AutoFeatureExtractor:
239
+ r"""
240
+ This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the
241
+ library when created with the [`AutoFeatureExtractor.from_pretrained`] class method.
242
+
243
+ This class cannot be instantiated directly using `__init__()` (throws an error).
244
+ """
245
+
246
+ def __init__(self):
247
+ raise EnvironmentError(
248
+ "AutoFeatureExtractor is designed to be instantiated "
249
+ "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method."
250
+ )
251
+
252
+ @classmethod
253
+ @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
254
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
255
+ r"""
256
+ Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary.
257
+
258
+ The feature extractor class to instantiate is selected based on the `model_type` property of the config object
259
+ (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
260
+ missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
261
+
262
+ List options
263
+
264
+ Params:
265
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
266
+ This can be either:
267
+
268
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
269
+ huggingface.co.
270
+ - a path to a *directory* containing a feature extractor file saved using the
271
+ [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
272
+ `./my_model_directory/`.
273
+ - a path or url to a saved feature extractor JSON *file*, e.g.,
274
+ `./my_model_directory/preprocessor_config.json`.
275
+ cache_dir (`str` or `os.PathLike`, *optional*):
276
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
277
+ standard cache should not be used.
278
+ force_download (`bool`, *optional*, defaults to `False`):
279
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
280
+ if they exist.
281
+ resume_download (`bool`, *optional*, defaults to `False`):
282
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
283
+ exists.
284
+ proxies (`Dict[str, str]`, *optional*):
285
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
286
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
287
+ token (`str` or *bool*, *optional*):
288
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
289
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
290
+ revision (`str`, *optional*, defaults to `"main"`):
291
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
292
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
293
+ identifier allowed by git.
294
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
295
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
296
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
297
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
298
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
299
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
300
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
301
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
302
+ execute code present on the Hub on your local machine.
303
+ kwargs (`Dict[str, Any]`, *optional*):
304
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
305
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
306
+ controlled by the `return_unused_kwargs` keyword parameter.
307
+
308
+ <Tip>
309
+
310
+ Passing `token=True` is required when you want to use a private model.
311
+
312
+ </Tip>
313
+
314
+ Examples:
315
+
316
+ ```python
317
+ >>> from transformers import AutoFeatureExtractor
318
+
319
+ >>> # Download feature extractor from huggingface.co and cache.
320
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
321
+
322
+ >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*)
323
+ >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/")
324
+ ```"""
325
+ use_auth_token = kwargs.pop("use_auth_token", None)
326
+ if use_auth_token is not None:
327
+ warnings.warn(
328
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
329
+ FutureWarning,
330
+ )
331
+ if kwargs.get("token", None) is not None:
332
+ raise ValueError(
333
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
334
+ )
335
+ kwargs["token"] = use_auth_token
336
+
337
+ config = kwargs.pop("config", None)
338
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
339
+ kwargs["_from_auto"] = True
340
+
341
+ config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
342
+ feature_extractor_class = config_dict.get("feature_extractor_type", None)
343
+ feature_extractor_auto_map = None
344
+ if "AutoFeatureExtractor" in config_dict.get("auto_map", {}):
345
+ feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"]
346
+
347
+ # If we don't find the feature extractor class in the feature extractor config, let's try the model config.
348
+ if feature_extractor_class is None and feature_extractor_auto_map is None:
349
+ if not isinstance(config, PretrainedConfig):
350
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
351
+ # It could be in `config.feature_extractor_type``
352
+ feature_extractor_class = getattr(config, "feature_extractor_type", None)
353
+ if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map:
354
+ feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"]
355
+
356
+ if feature_extractor_class is not None:
357
+ feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
358
+
359
+ has_remote_code = feature_extractor_auto_map is not None
360
+ has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING
361
+ trust_remote_code = resolve_trust_remote_code(
362
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
363
+ )
364
+
365
+ if has_remote_code and trust_remote_code:
366
+ feature_extractor_class = get_class_from_dynamic_module(
367
+ feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs
368
+ )
369
+ _ = kwargs.pop("code_revision", None)
370
+ if os.path.isdir(pretrained_model_name_or_path):
371
+ feature_extractor_class.register_for_auto_class()
372
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
373
+ elif feature_extractor_class is not None:
374
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
375
+ # Last try: we use the FEATURE_EXTRACTOR_MAPPING.
376
+ elif type(config) in FEATURE_EXTRACTOR_MAPPING:
377
+ feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
378
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
379
+
380
+ raise ValueError(
381
+ f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
382
+ f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
383
+ f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
384
+ )
385
+
386
+ @staticmethod
387
+ def register(config_class, feature_extractor_class, exist_ok=False):
388
+ """
389
+ Register a new feature extractor for this class.
390
+
391
+ Args:
392
+ config_class ([`PretrainedConfig`]):
393
+ The configuration corresponding to the model to register.
394
+ feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.
395
+ """
396
+ FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)
venv/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py ADDED
@@ -0,0 +1,1705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Model class."""
16
+
17
+ import warnings
18
+ from collections import OrderedDict
19
+
20
+ from ...utils import logging
21
+ from .auto_factory import (
22
+ _BaseAutoBackboneClass,
23
+ _BaseAutoModelClass,
24
+ _LazyAutoMapping,
25
+ auto_class_update,
26
+ )
27
+ from .configuration_auto import CONFIG_MAPPING_NAMES
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ MODEL_MAPPING_NAMES = OrderedDict(
33
+ [
34
+ # Base model mapping
35
+ ("albert", "AlbertModel"),
36
+ ("align", "AlignModel"),
37
+ ("altclip", "AltCLIPModel"),
38
+ ("audio-spectrogram-transformer", "ASTModel"),
39
+ ("autoformer", "AutoformerModel"),
40
+ ("bark", "BarkModel"),
41
+ ("bart", "BartModel"),
42
+ ("beit", "BeitModel"),
43
+ ("bert", "BertModel"),
44
+ ("bert-generation", "BertGenerationEncoder"),
45
+ ("big_bird", "BigBirdModel"),
46
+ ("bigbird_pegasus", "BigBirdPegasusModel"),
47
+ ("biogpt", "BioGptModel"),
48
+ ("bit", "BitModel"),
49
+ ("blenderbot", "BlenderbotModel"),
50
+ ("blenderbot-small", "BlenderbotSmallModel"),
51
+ ("blip", "BlipModel"),
52
+ ("blip-2", "Blip2Model"),
53
+ ("bloom", "BloomModel"),
54
+ ("bridgetower", "BridgeTowerModel"),
55
+ ("bros", "BrosModel"),
56
+ ("camembert", "CamembertModel"),
57
+ ("canine", "CanineModel"),
58
+ ("chinese_clip", "ChineseCLIPModel"),
59
+ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"),
60
+ ("clap", "ClapModel"),
61
+ ("clip", "CLIPModel"),
62
+ ("clip_vision_model", "CLIPVisionModel"),
63
+ ("clipseg", "CLIPSegModel"),
64
+ ("clvp", "ClvpModelForConditionalGeneration"),
65
+ ("code_llama", "LlamaModel"),
66
+ ("codegen", "CodeGenModel"),
67
+ ("cohere", "CohereModel"),
68
+ ("conditional_detr", "ConditionalDetrModel"),
69
+ ("convbert", "ConvBertModel"),
70
+ ("convnext", "ConvNextModel"),
71
+ ("convnextv2", "ConvNextV2Model"),
72
+ ("cpmant", "CpmAntModel"),
73
+ ("ctrl", "CTRLModel"),
74
+ ("cvt", "CvtModel"),
75
+ ("data2vec-audio", "Data2VecAudioModel"),
76
+ ("data2vec-text", "Data2VecTextModel"),
77
+ ("data2vec-vision", "Data2VecVisionModel"),
78
+ ("dbrx", "DbrxModel"),
79
+ ("deberta", "DebertaModel"),
80
+ ("deberta-v2", "DebertaV2Model"),
81
+ ("decision_transformer", "DecisionTransformerModel"),
82
+ ("deformable_detr", "DeformableDetrModel"),
83
+ ("deit", "DeiTModel"),
84
+ ("deta", "DetaModel"),
85
+ ("detr", "DetrModel"),
86
+ ("dinat", "DinatModel"),
87
+ ("dinov2", "Dinov2Model"),
88
+ ("distilbert", "DistilBertModel"),
89
+ ("donut-swin", "DonutSwinModel"),
90
+ ("dpr", "DPRQuestionEncoder"),
91
+ ("dpt", "DPTModel"),
92
+ ("efficientformer", "EfficientFormerModel"),
93
+ ("efficientnet", "EfficientNetModel"),
94
+ ("electra", "ElectraModel"),
95
+ ("encodec", "EncodecModel"),
96
+ ("ernie", "ErnieModel"),
97
+ ("ernie_m", "ErnieMModel"),
98
+ ("esm", "EsmModel"),
99
+ ("falcon", "FalconModel"),
100
+ ("fastspeech2_conformer", "FastSpeech2ConformerModel"),
101
+ ("flaubert", "FlaubertModel"),
102
+ ("flava", "FlavaModel"),
103
+ ("fnet", "FNetModel"),
104
+ ("focalnet", "FocalNetModel"),
105
+ ("fsmt", "FSMTModel"),
106
+ ("funnel", ("FunnelModel", "FunnelBaseModel")),
107
+ ("gemma", "GemmaModel"),
108
+ ("git", "GitModel"),
109
+ ("glpn", "GLPNModel"),
110
+ ("gpt-sw3", "GPT2Model"),
111
+ ("gpt2", "GPT2Model"),
112
+ ("gpt_bigcode", "GPTBigCodeModel"),
113
+ ("gpt_neo", "GPTNeoModel"),
114
+ ("gpt_neox", "GPTNeoXModel"),
115
+ ("gpt_neox_japanese", "GPTNeoXJapaneseModel"),
116
+ ("gptj", "GPTJModel"),
117
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
118
+ ("graphormer", "GraphormerModel"),
119
+ ("grounding-dino", "GroundingDinoModel"),
120
+ ("groupvit", "GroupViTModel"),
121
+ ("hubert", "HubertModel"),
122
+ ("ibert", "IBertModel"),
123
+ ("idefics", "IdeficsModel"),
124
+ ("idefics2", "Idefics2Model"),
125
+ ("imagegpt", "ImageGPTModel"),
126
+ ("informer", "InformerModel"),
127
+ ("jamba", "JambaModel"),
128
+ ("jukebox", "JukeboxModel"),
129
+ ("kosmos-2", "Kosmos2Model"),
130
+ ("layoutlm", "LayoutLMModel"),
131
+ ("layoutlmv2", "LayoutLMv2Model"),
132
+ ("layoutlmv3", "LayoutLMv3Model"),
133
+ ("led", "LEDModel"),
134
+ ("levit", "LevitModel"),
135
+ ("lilt", "LiltModel"),
136
+ ("llama", "LlamaModel"),
137
+ ("longformer", "LongformerModel"),
138
+ ("longt5", "LongT5Model"),
139
+ ("luke", "LukeModel"),
140
+ ("lxmert", "LxmertModel"),
141
+ ("m2m_100", "M2M100Model"),
142
+ ("mamba", "MambaModel"),
143
+ ("marian", "MarianModel"),
144
+ ("markuplm", "MarkupLMModel"),
145
+ ("mask2former", "Mask2FormerModel"),
146
+ ("maskformer", "MaskFormerModel"),
147
+ ("maskformer-swin", "MaskFormerSwinModel"),
148
+ ("mbart", "MBartModel"),
149
+ ("mctct", "MCTCTModel"),
150
+ ("mega", "MegaModel"),
151
+ ("megatron-bert", "MegatronBertModel"),
152
+ ("mgp-str", "MgpstrForSceneTextRecognition"),
153
+ ("mistral", "MistralModel"),
154
+ ("mixtral", "MixtralModel"),
155
+ ("mobilebert", "MobileBertModel"),
156
+ ("mobilenet_v1", "MobileNetV1Model"),
157
+ ("mobilenet_v2", "MobileNetV2Model"),
158
+ ("mobilevit", "MobileViTModel"),
159
+ ("mobilevitv2", "MobileViTV2Model"),
160
+ ("mpnet", "MPNetModel"),
161
+ ("mpt", "MptModel"),
162
+ ("mra", "MraModel"),
163
+ ("mt5", "MT5Model"),
164
+ ("mvp", "MvpModel"),
165
+ ("nat", "NatModel"),
166
+ ("nezha", "NezhaModel"),
167
+ ("nllb-moe", "NllbMoeModel"),
168
+ ("nystromformer", "NystromformerModel"),
169
+ ("olmo", "OlmoModel"),
170
+ ("oneformer", "OneFormerModel"),
171
+ ("open-llama", "OpenLlamaModel"),
172
+ ("openai-gpt", "OpenAIGPTModel"),
173
+ ("opt", "OPTModel"),
174
+ ("owlv2", "Owlv2Model"),
175
+ ("owlvit", "OwlViTModel"),
176
+ ("patchtsmixer", "PatchTSMixerModel"),
177
+ ("patchtst", "PatchTSTModel"),
178
+ ("pegasus", "PegasusModel"),
179
+ ("pegasus_x", "PegasusXModel"),
180
+ ("perceiver", "PerceiverModel"),
181
+ ("persimmon", "PersimmonModel"),
182
+ ("phi", "PhiModel"),
183
+ ("plbart", "PLBartModel"),
184
+ ("poolformer", "PoolFormerModel"),
185
+ ("prophetnet", "ProphetNetModel"),
186
+ ("pvt", "PvtModel"),
187
+ ("pvt_v2", "PvtV2Model"),
188
+ ("qdqbert", "QDQBertModel"),
189
+ ("qwen2", "Qwen2Model"),
190
+ ("qwen2_moe", "Qwen2MoeModel"),
191
+ ("recurrent_gemma", "RecurrentGemmaModel"),
192
+ ("reformer", "ReformerModel"),
193
+ ("regnet", "RegNetModel"),
194
+ ("rembert", "RemBertModel"),
195
+ ("resnet", "ResNetModel"),
196
+ ("retribert", "RetriBertModel"),
197
+ ("roberta", "RobertaModel"),
198
+ ("roberta-prelayernorm", "RobertaPreLayerNormModel"),
199
+ ("roc_bert", "RoCBertModel"),
200
+ ("roformer", "RoFormerModel"),
201
+ ("rwkv", "RwkvModel"),
202
+ ("sam", "SamModel"),
203
+ ("seamless_m4t", "SeamlessM4TModel"),
204
+ ("seamless_m4t_v2", "SeamlessM4Tv2Model"),
205
+ ("segformer", "SegformerModel"),
206
+ ("seggpt", "SegGptModel"),
207
+ ("sew", "SEWModel"),
208
+ ("sew-d", "SEWDModel"),
209
+ ("siglip", "SiglipModel"),
210
+ ("siglip_vision_model", "SiglipVisionModel"),
211
+ ("speech_to_text", "Speech2TextModel"),
212
+ ("speecht5", "SpeechT5Model"),
213
+ ("splinter", "SplinterModel"),
214
+ ("squeezebert", "SqueezeBertModel"),
215
+ ("stablelm", "StableLmModel"),
216
+ ("starcoder2", "Starcoder2Model"),
217
+ ("swiftformer", "SwiftFormerModel"),
218
+ ("swin", "SwinModel"),
219
+ ("swin2sr", "Swin2SRModel"),
220
+ ("swinv2", "Swinv2Model"),
221
+ ("switch_transformers", "SwitchTransformersModel"),
222
+ ("t5", "T5Model"),
223
+ ("table-transformer", "TableTransformerModel"),
224
+ ("tapas", "TapasModel"),
225
+ ("time_series_transformer", "TimeSeriesTransformerModel"),
226
+ ("timesformer", "TimesformerModel"),
227
+ ("timm_backbone", "TimmBackbone"),
228
+ ("trajectory_transformer", "TrajectoryTransformerModel"),
229
+ ("transfo-xl", "TransfoXLModel"),
230
+ ("tvlt", "TvltModel"),
231
+ ("tvp", "TvpModel"),
232
+ ("udop", "UdopModel"),
233
+ ("umt5", "UMT5Model"),
234
+ ("unispeech", "UniSpeechModel"),
235
+ ("unispeech-sat", "UniSpeechSatModel"),
236
+ ("univnet", "UnivNetModel"),
237
+ ("van", "VanModel"),
238
+ ("videomae", "VideoMAEModel"),
239
+ ("vilt", "ViltModel"),
240
+ ("vision-text-dual-encoder", "VisionTextDualEncoderModel"),
241
+ ("visual_bert", "VisualBertModel"),
242
+ ("vit", "ViTModel"),
243
+ ("vit_hybrid", "ViTHybridModel"),
244
+ ("vit_mae", "ViTMAEModel"),
245
+ ("vit_msn", "ViTMSNModel"),
246
+ ("vitdet", "VitDetModel"),
247
+ ("vits", "VitsModel"),
248
+ ("vivit", "VivitModel"),
249
+ ("wav2vec2", "Wav2Vec2Model"),
250
+ ("wav2vec2-bert", "Wav2Vec2BertModel"),
251
+ ("wav2vec2-conformer", "Wav2Vec2ConformerModel"),
252
+ ("wavlm", "WavLMModel"),
253
+ ("whisper", "WhisperModel"),
254
+ ("xclip", "XCLIPModel"),
255
+ ("xglm", "XGLMModel"),
256
+ ("xlm", "XLMModel"),
257
+ ("xlm-prophetnet", "XLMProphetNetModel"),
258
+ ("xlm-roberta", "XLMRobertaModel"),
259
+ ("xlm-roberta-xl", "XLMRobertaXLModel"),
260
+ ("xlnet", "XLNetModel"),
261
+ ("xmod", "XmodModel"),
262
+ ("yolos", "YolosModel"),
263
+ ("yoso", "YosoModel"),
264
+ ]
265
+ )
266
+
267
+ MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
268
+ [
269
+ # Model for pre-training mapping
270
+ ("albert", "AlbertForPreTraining"),
271
+ ("bart", "BartForConditionalGeneration"),
272
+ ("bert", "BertForPreTraining"),
273
+ ("big_bird", "BigBirdForPreTraining"),
274
+ ("bloom", "BloomForCausalLM"),
275
+ ("camembert", "CamembertForMaskedLM"),
276
+ ("ctrl", "CTRLLMHeadModel"),
277
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
278
+ ("deberta", "DebertaForMaskedLM"),
279
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
280
+ ("distilbert", "DistilBertForMaskedLM"),
281
+ ("electra", "ElectraForPreTraining"),
282
+ ("ernie", "ErnieForPreTraining"),
283
+ ("flaubert", "FlaubertWithLMHeadModel"),
284
+ ("flava", "FlavaForPreTraining"),
285
+ ("fnet", "FNetForPreTraining"),
286
+ ("fsmt", "FSMTForConditionalGeneration"),
287
+ ("funnel", "FunnelForPreTraining"),
288
+ ("gpt-sw3", "GPT2LMHeadModel"),
289
+ ("gpt2", "GPT2LMHeadModel"),
290
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
291
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
292
+ ("ibert", "IBertForMaskedLM"),
293
+ ("idefics", "IdeficsForVisionText2Text"),
294
+ ("idefics2", "Idefics2ForConditionalGeneration"),
295
+ ("layoutlm", "LayoutLMForMaskedLM"),
296
+ ("llava", "LlavaForConditionalGeneration"),
297
+ ("llava_next", "LlavaNextForConditionalGeneration"),
298
+ ("longformer", "LongformerForMaskedLM"),
299
+ ("luke", "LukeForMaskedLM"),
300
+ ("lxmert", "LxmertForPreTraining"),
301
+ ("mamba", "MambaForCausalLM"),
302
+ ("mega", "MegaForMaskedLM"),
303
+ ("megatron-bert", "MegatronBertForPreTraining"),
304
+ ("mobilebert", "MobileBertForPreTraining"),
305
+ ("mpnet", "MPNetForMaskedLM"),
306
+ ("mpt", "MptForCausalLM"),
307
+ ("mra", "MraForMaskedLM"),
308
+ ("mvp", "MvpForConditionalGeneration"),
309
+ ("nezha", "NezhaForPreTraining"),
310
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
311
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
312
+ ("retribert", "RetriBertModel"),
313
+ ("roberta", "RobertaForMaskedLM"),
314
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
315
+ ("roc_bert", "RoCBertForPreTraining"),
316
+ ("rwkv", "RwkvForCausalLM"),
317
+ ("splinter", "SplinterForPreTraining"),
318
+ ("squeezebert", "SqueezeBertForMaskedLM"),
319
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
320
+ ("t5", "T5ForConditionalGeneration"),
321
+ ("tapas", "TapasForMaskedLM"),
322
+ ("transfo-xl", "TransfoXLLMHeadModel"),
323
+ ("tvlt", "TvltForPreTraining"),
324
+ ("unispeech", "UniSpeechForPreTraining"),
325
+ ("unispeech-sat", "UniSpeechSatForPreTraining"),
326
+ ("videomae", "VideoMAEForPreTraining"),
327
+ ("vipllava", "VipLlavaForConditionalGeneration"),
328
+ ("visual_bert", "VisualBertForPreTraining"),
329
+ ("vit_mae", "ViTMAEForPreTraining"),
330
+ ("wav2vec2", "Wav2Vec2ForPreTraining"),
331
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"),
332
+ ("xlm", "XLMWithLMHeadModel"),
333
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
334
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
335
+ ("xlnet", "XLNetLMHeadModel"),
336
+ ("xmod", "XmodForMaskedLM"),
337
+ ]
338
+ )
339
+
340
+ MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
341
+ [
342
+ # Model with LM heads mapping
343
+ ("albert", "AlbertForMaskedLM"),
344
+ ("bart", "BartForConditionalGeneration"),
345
+ ("bert", "BertForMaskedLM"),
346
+ ("big_bird", "BigBirdForMaskedLM"),
347
+ ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"),
348
+ ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"),
349
+ ("bloom", "BloomForCausalLM"),
350
+ ("camembert", "CamembertForMaskedLM"),
351
+ ("codegen", "CodeGenForCausalLM"),
352
+ ("convbert", "ConvBertForMaskedLM"),
353
+ ("cpmant", "CpmAntForCausalLM"),
354
+ ("ctrl", "CTRLLMHeadModel"),
355
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
356
+ ("deberta", "DebertaForMaskedLM"),
357
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
358
+ ("distilbert", "DistilBertForMaskedLM"),
359
+ ("electra", "ElectraForMaskedLM"),
360
+ ("encoder-decoder", "EncoderDecoderModel"),
361
+ ("ernie", "ErnieForMaskedLM"),
362
+ ("esm", "EsmForMaskedLM"),
363
+ ("flaubert", "FlaubertWithLMHeadModel"),
364
+ ("fnet", "FNetForMaskedLM"),
365
+ ("fsmt", "FSMTForConditionalGeneration"),
366
+ ("funnel", "FunnelForMaskedLM"),
367
+ ("git", "GitForCausalLM"),
368
+ ("gpt-sw3", "GPT2LMHeadModel"),
369
+ ("gpt2", "GPT2LMHeadModel"),
370
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
371
+ ("gpt_neo", "GPTNeoForCausalLM"),
372
+ ("gpt_neox", "GPTNeoXForCausalLM"),
373
+ ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"),
374
+ ("gptj", "GPTJForCausalLM"),
375
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
376
+ ("ibert", "IBertForMaskedLM"),
377
+ ("layoutlm", "LayoutLMForMaskedLM"),
378
+ ("led", "LEDForConditionalGeneration"),
379
+ ("longformer", "LongformerForMaskedLM"),
380
+ ("longt5", "LongT5ForConditionalGeneration"),
381
+ ("luke", "LukeForMaskedLM"),
382
+ ("m2m_100", "M2M100ForConditionalGeneration"),
383
+ ("mamba", "MambaForCausalLM"),
384
+ ("marian", "MarianMTModel"),
385
+ ("mega", "MegaForMaskedLM"),
386
+ ("megatron-bert", "MegatronBertForCausalLM"),
387
+ ("mobilebert", "MobileBertForMaskedLM"),
388
+ ("mpnet", "MPNetForMaskedLM"),
389
+ ("mpt", "MptForCausalLM"),
390
+ ("mra", "MraForMaskedLM"),
391
+ ("mvp", "MvpForConditionalGeneration"),
392
+ ("nezha", "NezhaForMaskedLM"),
393
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
394
+ ("nystromformer", "NystromformerForMaskedLM"),
395
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
396
+ ("pegasus_x", "PegasusXForConditionalGeneration"),
397
+ ("plbart", "PLBartForConditionalGeneration"),
398
+ ("pop2piano", "Pop2PianoForConditionalGeneration"),
399
+ ("qdqbert", "QDQBertForMaskedLM"),
400
+ ("reformer", "ReformerModelWithLMHead"),
401
+ ("rembert", "RemBertForMaskedLM"),
402
+ ("roberta", "RobertaForMaskedLM"),
403
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
404
+ ("roc_bert", "RoCBertForMaskedLM"),
405
+ ("roformer", "RoFormerForMaskedLM"),
406
+ ("rwkv", "RwkvForCausalLM"),
407
+ ("speech_to_text", "Speech2TextForConditionalGeneration"),
408
+ ("squeezebert", "SqueezeBertForMaskedLM"),
409
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
410
+ ("t5", "T5ForConditionalGeneration"),
411
+ ("tapas", "TapasForMaskedLM"),
412
+ ("transfo-xl", "TransfoXLLMHeadModel"),
413
+ ("wav2vec2", "Wav2Vec2ForMaskedLM"),
414
+ ("whisper", "WhisperForConditionalGeneration"),
415
+ ("xlm", "XLMWithLMHeadModel"),
416
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
417
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
418
+ ("xlnet", "XLNetLMHeadModel"),
419
+ ("xmod", "XmodForMaskedLM"),
420
+ ("yoso", "YosoForMaskedLM"),
421
+ ]
422
+ )
423
+
424
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
425
+ [
426
+ # Model for Causal LM mapping
427
+ ("bart", "BartForCausalLM"),
428
+ ("bert", "BertLMHeadModel"),
429
+ ("bert-generation", "BertGenerationDecoder"),
430
+ ("big_bird", "BigBirdForCausalLM"),
431
+ ("bigbird_pegasus", "BigBirdPegasusForCausalLM"),
432
+ ("biogpt", "BioGptForCausalLM"),
433
+ ("blenderbot", "BlenderbotForCausalLM"),
434
+ ("blenderbot-small", "BlenderbotSmallForCausalLM"),
435
+ ("bloom", "BloomForCausalLM"),
436
+ ("camembert", "CamembertForCausalLM"),
437
+ ("code_llama", "LlamaForCausalLM"),
438
+ ("codegen", "CodeGenForCausalLM"),
439
+ ("cohere", "CohereForCausalLM"),
440
+ ("cpmant", "CpmAntForCausalLM"),
441
+ ("ctrl", "CTRLLMHeadModel"),
442
+ ("data2vec-text", "Data2VecTextForCausalLM"),
443
+ ("dbrx", "DbrxForCausalLM"),
444
+ ("electra", "ElectraForCausalLM"),
445
+ ("ernie", "ErnieForCausalLM"),
446
+ ("falcon", "FalconForCausalLM"),
447
+ ("fuyu", "FuyuForCausalLM"),
448
+ ("gemma", "GemmaForCausalLM"),
449
+ ("git", "GitForCausalLM"),
450
+ ("gpt-sw3", "GPT2LMHeadModel"),
451
+ ("gpt2", "GPT2LMHeadModel"),
452
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
453
+ ("gpt_neo", "GPTNeoForCausalLM"),
454
+ ("gpt_neox", "GPTNeoXForCausalLM"),
455
+ ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"),
456
+ ("gptj", "GPTJForCausalLM"),
457
+ ("jamba", "JambaForCausalLM"),
458
+ ("llama", "LlamaForCausalLM"),
459
+ ("mamba", "MambaForCausalLM"),
460
+ ("marian", "MarianForCausalLM"),
461
+ ("mbart", "MBartForCausalLM"),
462
+ ("mega", "MegaForCausalLM"),
463
+ ("megatron-bert", "MegatronBertForCausalLM"),
464
+ ("mistral", "MistralForCausalLM"),
465
+ ("mixtral", "MixtralForCausalLM"),
466
+ ("mpt", "MptForCausalLM"),
467
+ ("musicgen", "MusicgenForCausalLM"),
468
+ ("musicgen_melody", "MusicgenMelodyForCausalLM"),
469
+ ("mvp", "MvpForCausalLM"),
470
+ ("olmo", "OlmoForCausalLM"),
471
+ ("open-llama", "OpenLlamaForCausalLM"),
472
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
473
+ ("opt", "OPTForCausalLM"),
474
+ ("pegasus", "PegasusForCausalLM"),
475
+ ("persimmon", "PersimmonForCausalLM"),
476
+ ("phi", "PhiForCausalLM"),
477
+ ("plbart", "PLBartForCausalLM"),
478
+ ("prophetnet", "ProphetNetForCausalLM"),
479
+ ("qdqbert", "QDQBertLMHeadModel"),
480
+ ("qwen2", "Qwen2ForCausalLM"),
481
+ ("qwen2_moe", "Qwen2MoeForCausalLM"),
482
+ ("recurrent_gemma", "RecurrentGemmaForCausalLM"),
483
+ ("reformer", "ReformerModelWithLMHead"),
484
+ ("rembert", "RemBertForCausalLM"),
485
+ ("roberta", "RobertaForCausalLM"),
486
+ ("roberta-prelayernorm", "RobertaPreLayerNormForCausalLM"),
487
+ ("roc_bert", "RoCBertForCausalLM"),
488
+ ("roformer", "RoFormerForCausalLM"),
489
+ ("rwkv", "RwkvForCausalLM"),
490
+ ("speech_to_text_2", "Speech2Text2ForCausalLM"),
491
+ ("stablelm", "StableLmForCausalLM"),
492
+ ("starcoder2", "Starcoder2ForCausalLM"),
493
+ ("transfo-xl", "TransfoXLLMHeadModel"),
494
+ ("trocr", "TrOCRForCausalLM"),
495
+ ("whisper", "WhisperForCausalLM"),
496
+ ("xglm", "XGLMForCausalLM"),
497
+ ("xlm", "XLMWithLMHeadModel"),
498
+ ("xlm-prophetnet", "XLMProphetNetForCausalLM"),
499
+ ("xlm-roberta", "XLMRobertaForCausalLM"),
500
+ ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"),
501
+ ("xlnet", "XLNetLMHeadModel"),
502
+ ("xmod", "XmodForCausalLM"),
503
+ ]
504
+ )
505
+
506
+ MODEL_FOR_IMAGE_MAPPING_NAMES = OrderedDict(
507
+ [
508
+ # Model for Image mapping
509
+ ("beit", "BeitModel"),
510
+ ("bit", "BitModel"),
511
+ ("conditional_detr", "ConditionalDetrModel"),
512
+ ("convnext", "ConvNextModel"),
513
+ ("convnextv2", "ConvNextV2Model"),
514
+ ("data2vec-vision", "Data2VecVisionModel"),
515
+ ("deformable_detr", "DeformableDetrModel"),
516
+ ("deit", "DeiTModel"),
517
+ ("deta", "DetaModel"),
518
+ ("detr", "DetrModel"),
519
+ ("dinat", "DinatModel"),
520
+ ("dinov2", "Dinov2Model"),
521
+ ("dpt", "DPTModel"),
522
+ ("efficientformer", "EfficientFormerModel"),
523
+ ("efficientnet", "EfficientNetModel"),
524
+ ("focalnet", "FocalNetModel"),
525
+ ("glpn", "GLPNModel"),
526
+ ("imagegpt", "ImageGPTModel"),
527
+ ("levit", "LevitModel"),
528
+ ("mobilenet_v1", "MobileNetV1Model"),
529
+ ("mobilenet_v2", "MobileNetV2Model"),
530
+ ("mobilevit", "MobileViTModel"),
531
+ ("mobilevitv2", "MobileViTV2Model"),
532
+ ("nat", "NatModel"),
533
+ ("poolformer", "PoolFormerModel"),
534
+ ("pvt", "PvtModel"),
535
+ ("regnet", "RegNetModel"),
536
+ ("resnet", "ResNetModel"),
537
+ ("segformer", "SegformerModel"),
538
+ ("siglip_vision_model", "SiglipVisionModel"),
539
+ ("swiftformer", "SwiftFormerModel"),
540
+ ("swin", "SwinModel"),
541
+ ("swin2sr", "Swin2SRModel"),
542
+ ("swinv2", "Swinv2Model"),
543
+ ("table-transformer", "TableTransformerModel"),
544
+ ("timesformer", "TimesformerModel"),
545
+ ("timm_backbone", "TimmBackbone"),
546
+ ("van", "VanModel"),
547
+ ("videomae", "VideoMAEModel"),
548
+ ("vit", "ViTModel"),
549
+ ("vit_hybrid", "ViTHybridModel"),
550
+ ("vit_mae", "ViTMAEModel"),
551
+ ("vit_msn", "ViTMSNModel"),
552
+ ("vitdet", "VitDetModel"),
553
+ ("vivit", "VivitModel"),
554
+ ("yolos", "YolosModel"),
555
+ ]
556
+ )
557
+
558
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
559
+ [
560
+ ("deit", "DeiTForMaskedImageModeling"),
561
+ ("focalnet", "FocalNetForMaskedImageModeling"),
562
+ ("swin", "SwinForMaskedImageModeling"),
563
+ ("swinv2", "Swinv2ForMaskedImageModeling"),
564
+ ("vit", "ViTForMaskedImageModeling"),
565
+ ]
566
+ )
567
+
568
+
569
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
570
+ # Model for Causal Image Modeling mapping
571
+ [
572
+ ("imagegpt", "ImageGPTForCausalImageModeling"),
573
+ ]
574
+ )
575
+
576
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
577
+ [
578
+ # Model for Image Classification mapping
579
+ ("beit", "BeitForImageClassification"),
580
+ ("bit", "BitForImageClassification"),
581
+ ("clip", "CLIPForImageClassification"),
582
+ ("convnext", "ConvNextForImageClassification"),
583
+ ("convnextv2", "ConvNextV2ForImageClassification"),
584
+ ("cvt", "CvtForImageClassification"),
585
+ ("data2vec-vision", "Data2VecVisionForImageClassification"),
586
+ (
587
+ "deit",
588
+ ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher"),
589
+ ),
590
+ ("dinat", "DinatForImageClassification"),
591
+ ("dinov2", "Dinov2ForImageClassification"),
592
+ (
593
+ "efficientformer",
594
+ (
595
+ "EfficientFormerForImageClassification",
596
+ "EfficientFormerForImageClassificationWithTeacher",
597
+ ),
598
+ ),
599
+ ("efficientnet", "EfficientNetForImageClassification"),
600
+ ("focalnet", "FocalNetForImageClassification"),
601
+ ("imagegpt", "ImageGPTForImageClassification"),
602
+ (
603
+ "levit",
604
+ ("LevitForImageClassification", "LevitForImageClassificationWithTeacher"),
605
+ ),
606
+ ("mobilenet_v1", "MobileNetV1ForImageClassification"),
607
+ ("mobilenet_v2", "MobileNetV2ForImageClassification"),
608
+ ("mobilevit", "MobileViTForImageClassification"),
609
+ ("mobilevitv2", "MobileViTV2ForImageClassification"),
610
+ ("nat", "NatForImageClassification"),
611
+ (
612
+ "perceiver",
613
+ (
614
+ "PerceiverForImageClassificationLearned",
615
+ "PerceiverForImageClassificationFourier",
616
+ "PerceiverForImageClassificationConvProcessing",
617
+ ),
618
+ ),
619
+ ("poolformer", "PoolFormerForImageClassification"),
620
+ ("pvt", "PvtForImageClassification"),
621
+ ("pvt_v2", "PvtV2ForImageClassification"),
622
+ ("regnet", "RegNetForImageClassification"),
623
+ ("resnet", "ResNetForImageClassification"),
624
+ ("segformer", "SegformerForImageClassification"),
625
+ ("siglip", "SiglipForImageClassification"),
626
+ ("swiftformer", "SwiftFormerForImageClassification"),
627
+ ("swin", "SwinForImageClassification"),
628
+ ("swinv2", "Swinv2ForImageClassification"),
629
+ ("van", "VanForImageClassification"),
630
+ ("vit", "ViTForImageClassification"),
631
+ ("vit_hybrid", "ViTHybridForImageClassification"),
632
+ ("vit_msn", "ViTMSNForImageClassification"),
633
+ ]
634
+ )
635
+
636
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict(
637
+ [
638
+ # Do not add new models here, this class will be deprecated in the future.
639
+ # Model for Image Segmentation mapping
640
+ ("detr", "DetrForSegmentation"),
641
+ ]
642
+ )
643
+
644
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict(
645
+ [
646
+ # Model for Semantic Segmentation mapping
647
+ ("beit", "BeitForSemanticSegmentation"),
648
+ ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"),
649
+ ("dpt", "DPTForSemanticSegmentation"),
650
+ ("mobilenet_v2", "MobileNetV2ForSemanticSegmentation"),
651
+ ("mobilevit", "MobileViTForSemanticSegmentation"),
652
+ ("mobilevitv2", "MobileViTV2ForSemanticSegmentation"),
653
+ ("segformer", "SegformerForSemanticSegmentation"),
654
+ ("upernet", "UperNetForSemanticSegmentation"),
655
+ ]
656
+ )
657
+
658
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict(
659
+ [
660
+ # Model for Instance Segmentation mapping
661
+ # MaskFormerForInstanceSegmentation can be removed from this mapping in v5
662
+ ("maskformer", "MaskFormerForInstanceSegmentation"),
663
+ ]
664
+ )
665
+
666
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict(
667
+ [
668
+ # Model for Universal Segmentation mapping
669
+ ("detr", "DetrForSegmentation"),
670
+ ("mask2former", "Mask2FormerForUniversalSegmentation"),
671
+ ("maskformer", "MaskFormerForInstanceSegmentation"),
672
+ ("oneformer", "OneFormerForUniversalSegmentation"),
673
+ ]
674
+ )
675
+
676
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
677
+ [
678
+ ("timesformer", "TimesformerForVideoClassification"),
679
+ ("videomae", "VideoMAEForVideoClassification"),
680
+ ("vivit", "VivitForVideoClassification"),
681
+ ]
682
+ )
683
+
684
+ MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
685
+ [
686
+ ("blip", "BlipForConditionalGeneration"),
687
+ ("blip-2", "Blip2ForConditionalGeneration"),
688
+ ("git", "GitForCausalLM"),
689
+ ("idefics2", "Idefics2ForConditionalGeneration"),
690
+ ("instructblip", "InstructBlipForConditionalGeneration"),
691
+ ("kosmos-2", "Kosmos2ForConditionalGeneration"),
692
+ ("llava", "LlavaForConditionalGeneration"),
693
+ ("llava_next", "LlavaNextForConditionalGeneration"),
694
+ ("pix2struct", "Pix2StructForConditionalGeneration"),
695
+ ("vipllava", "VipLlavaForConditionalGeneration"),
696
+ ("vision-encoder-decoder", "VisionEncoderDecoderModel"),
697
+ ]
698
+ )
699
+
700
+ MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
701
+ [
702
+ # Model for Masked LM mapping
703
+ ("albert", "AlbertForMaskedLM"),
704
+ ("bart", "BartForConditionalGeneration"),
705
+ ("bert", "BertForMaskedLM"),
706
+ ("big_bird", "BigBirdForMaskedLM"),
707
+ ("camembert", "CamembertForMaskedLM"),
708
+ ("convbert", "ConvBertForMaskedLM"),
709
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
710
+ ("deberta", "DebertaForMaskedLM"),
711
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
712
+ ("distilbert", "DistilBertForMaskedLM"),
713
+ ("electra", "ElectraForMaskedLM"),
714
+ ("ernie", "ErnieForMaskedLM"),
715
+ ("esm", "EsmForMaskedLM"),
716
+ ("flaubert", "FlaubertWithLMHeadModel"),
717
+ ("fnet", "FNetForMaskedLM"),
718
+ ("funnel", "FunnelForMaskedLM"),
719
+ ("ibert", "IBertForMaskedLM"),
720
+ ("layoutlm", "LayoutLMForMaskedLM"),
721
+ ("longformer", "LongformerForMaskedLM"),
722
+ ("luke", "LukeForMaskedLM"),
723
+ ("mbart", "MBartForConditionalGeneration"),
724
+ ("mega", "MegaForMaskedLM"),
725
+ ("megatron-bert", "MegatronBertForMaskedLM"),
726
+ ("mobilebert", "MobileBertForMaskedLM"),
727
+ ("mpnet", "MPNetForMaskedLM"),
728
+ ("mra", "MraForMaskedLM"),
729
+ ("mvp", "MvpForConditionalGeneration"),
730
+ ("nezha", "NezhaForMaskedLM"),
731
+ ("nystromformer", "NystromformerForMaskedLM"),
732
+ ("perceiver", "PerceiverForMaskedLM"),
733
+ ("qdqbert", "QDQBertForMaskedLM"),
734
+ ("reformer", "ReformerForMaskedLM"),
735
+ ("rembert", "RemBertForMaskedLM"),
736
+ ("roberta", "RobertaForMaskedLM"),
737
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
738
+ ("roc_bert", "RoCBertForMaskedLM"),
739
+ ("roformer", "RoFormerForMaskedLM"),
740
+ ("squeezebert", "SqueezeBertForMaskedLM"),
741
+ ("tapas", "TapasForMaskedLM"),
742
+ ("wav2vec2", "Wav2Vec2ForMaskedLM"),
743
+ ("xlm", "XLMWithLMHeadModel"),
744
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
745
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
746
+ ("xmod", "XmodForMaskedLM"),
747
+ ("yoso", "YosoForMaskedLM"),
748
+ ]
749
+ )
750
+
751
+ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
752
+ [
753
+ # Model for Object Detection mapping
754
+ ("conditional_detr", "ConditionalDetrForObjectDetection"),
755
+ ("deformable_detr", "DeformableDetrForObjectDetection"),
756
+ ("deta", "DetaForObjectDetection"),
757
+ ("detr", "DetrForObjectDetection"),
758
+ ("table-transformer", "TableTransformerForObjectDetection"),
759
+ ("yolos", "YolosForObjectDetection"),
760
+ ]
761
+ )
762
+
763
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
764
+ [
765
+ # Model for Zero Shot Object Detection mapping
766
+ ("grounding-dino", "GroundingDinoForObjectDetection"),
767
+ ("owlv2", "Owlv2ForObjectDetection"),
768
+ ("owlvit", "OwlViTForObjectDetection"),
769
+ ]
770
+ )
771
+
772
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict(
773
+ [
774
+ # Model for depth estimation mapping
775
+ ("depth_anything", "DepthAnythingForDepthEstimation"),
776
+ ("dpt", "DPTForDepthEstimation"),
777
+ ("glpn", "GLPNForDepthEstimation"),
778
+ ]
779
+ )
780
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
781
+ [
782
+ # Model for Seq2Seq Causal LM mapping
783
+ ("bart", "BartForConditionalGeneration"),
784
+ ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"),
785
+ ("blenderbot", "BlenderbotForConditionalGeneration"),
786
+ ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"),
787
+ ("encoder-decoder", "EncoderDecoderModel"),
788
+ ("fsmt", "FSMTForConditionalGeneration"),
789
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
790
+ ("led", "LEDForConditionalGeneration"),
791
+ ("longt5", "LongT5ForConditionalGeneration"),
792
+ ("m2m_100", "M2M100ForConditionalGeneration"),
793
+ ("marian", "MarianMTModel"),
794
+ ("mbart", "MBartForConditionalGeneration"),
795
+ ("mt5", "MT5ForConditionalGeneration"),
796
+ ("mvp", "MvpForConditionalGeneration"),
797
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
798
+ ("pegasus", "PegasusForConditionalGeneration"),
799
+ ("pegasus_x", "PegasusXForConditionalGeneration"),
800
+ ("plbart", "PLBartForConditionalGeneration"),
801
+ ("prophetnet", "ProphetNetForConditionalGeneration"),
802
+ ("seamless_m4t", "SeamlessM4TForTextToText"),
803
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToText"),
804
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
805
+ ("t5", "T5ForConditionalGeneration"),
806
+ ("umt5", "UMT5ForConditionalGeneration"),
807
+ ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"),
808
+ ]
809
+ )
810
+
811
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
812
+ [
813
+ ("pop2piano", "Pop2PianoForConditionalGeneration"),
814
+ ("seamless_m4t", "SeamlessM4TForSpeechToText"),
815
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForSpeechToText"),
816
+ ("speech-encoder-decoder", "SpeechEncoderDecoderModel"),
817
+ ("speech_to_text", "Speech2TextForConditionalGeneration"),
818
+ ("speecht5", "SpeechT5ForSpeechToText"),
819
+ ("whisper", "WhisperForConditionalGeneration"),
820
+ ]
821
+ )
822
+
823
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
824
+ [
825
+ # Model for Sequence Classification mapping
826
+ ("albert", "AlbertForSequenceClassification"),
827
+ ("bart", "BartForSequenceClassification"),
828
+ ("bert", "BertForSequenceClassification"),
829
+ ("big_bird", "BigBirdForSequenceClassification"),
830
+ ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"),
831
+ ("biogpt", "BioGptForSequenceClassification"),
832
+ ("bloom", "BloomForSequenceClassification"),
833
+ ("camembert", "CamembertForSequenceClassification"),
834
+ ("canine", "CanineForSequenceClassification"),
835
+ ("code_llama", "LlamaForSequenceClassification"),
836
+ ("convbert", "ConvBertForSequenceClassification"),
837
+ ("ctrl", "CTRLForSequenceClassification"),
838
+ ("data2vec-text", "Data2VecTextForSequenceClassification"),
839
+ ("deberta", "DebertaForSequenceClassification"),
840
+ ("deberta-v2", "DebertaV2ForSequenceClassification"),
841
+ ("distilbert", "DistilBertForSequenceClassification"),
842
+ ("electra", "ElectraForSequenceClassification"),
843
+ ("ernie", "ErnieForSequenceClassification"),
844
+ ("ernie_m", "ErnieMForSequenceClassification"),
845
+ ("esm", "EsmForSequenceClassification"),
846
+ ("falcon", "FalconForSequenceClassification"),
847
+ ("flaubert", "FlaubertForSequenceClassification"),
848
+ ("fnet", "FNetForSequenceClassification"),
849
+ ("funnel", "FunnelForSequenceClassification"),
850
+ ("gemma", "GemmaForSequenceClassification"),
851
+ ("gpt-sw3", "GPT2ForSequenceClassification"),
852
+ ("gpt2", "GPT2ForSequenceClassification"),
853
+ ("gpt_bigcode", "GPTBigCodeForSequenceClassification"),
854
+ ("gpt_neo", "GPTNeoForSequenceClassification"),
855
+ ("gpt_neox", "GPTNeoXForSequenceClassification"),
856
+ ("gptj", "GPTJForSequenceClassification"),
857
+ ("ibert", "IBertForSequenceClassification"),
858
+ ("jamba", "JambaForSequenceClassification"),
859
+ ("layoutlm", "LayoutLMForSequenceClassification"),
860
+ ("layoutlmv2", "LayoutLMv2ForSequenceClassification"),
861
+ ("layoutlmv3", "LayoutLMv3ForSequenceClassification"),
862
+ ("led", "LEDForSequenceClassification"),
863
+ ("lilt", "LiltForSequenceClassification"),
864
+ ("llama", "LlamaForSequenceClassification"),
865
+ ("longformer", "LongformerForSequenceClassification"),
866
+ ("luke", "LukeForSequenceClassification"),
867
+ ("markuplm", "MarkupLMForSequenceClassification"),
868
+ ("mbart", "MBartForSequenceClassification"),
869
+ ("mega", "MegaForSequenceClassification"),
870
+ ("megatron-bert", "MegatronBertForSequenceClassification"),
871
+ ("mistral", "MistralForSequenceClassification"),
872
+ ("mixtral", "MixtralForSequenceClassification"),
873
+ ("mobilebert", "MobileBertForSequenceClassification"),
874
+ ("mpnet", "MPNetForSequenceClassification"),
875
+ ("mpt", "MptForSequenceClassification"),
876
+ ("mra", "MraForSequenceClassification"),
877
+ ("mt5", "MT5ForSequenceClassification"),
878
+ ("mvp", "MvpForSequenceClassification"),
879
+ ("nezha", "NezhaForSequenceClassification"),
880
+ ("nystromformer", "NystromformerForSequenceClassification"),
881
+ ("open-llama", "OpenLlamaForSequenceClassification"),
882
+ ("openai-gpt", "OpenAIGPTForSequenceClassification"),
883
+ ("opt", "OPTForSequenceClassification"),
884
+ ("perceiver", "PerceiverForSequenceClassification"),
885
+ ("persimmon", "PersimmonForSequenceClassification"),
886
+ ("phi", "PhiForSequenceClassification"),
887
+ ("plbart", "PLBartForSequenceClassification"),
888
+ ("qdqbert", "QDQBertForSequenceClassification"),
889
+ ("qwen2", "Qwen2ForSequenceClassification"),
890
+ ("qwen2_moe", "Qwen2MoeForSequenceClassification"),
891
+ ("reformer", "ReformerForSequenceClassification"),
892
+ ("rembert", "RemBertForSequenceClassification"),
893
+ ("roberta", "RobertaForSequenceClassification"),
894
+ ("roberta-prelayernorm", "RobertaPreLayerNormForSequenceClassification"),
895
+ ("roc_bert", "RoCBertForSequenceClassification"),
896
+ ("roformer", "RoFormerForSequenceClassification"),
897
+ ("squeezebert", "SqueezeBertForSequenceClassification"),
898
+ ("stablelm", "StableLmForSequenceClassification"),
899
+ ("starcoder2", "Starcoder2ForSequenceClassification"),
900
+ ("t5", "T5ForSequenceClassification"),
901
+ ("tapas", "TapasForSequenceClassification"),
902
+ ("transfo-xl", "TransfoXLForSequenceClassification"),
903
+ ("umt5", "UMT5ForSequenceClassification"),
904
+ ("xlm", "XLMForSequenceClassification"),
905
+ ("xlm-roberta", "XLMRobertaForSequenceClassification"),
906
+ ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"),
907
+ ("xlnet", "XLNetForSequenceClassification"),
908
+ ("xmod", "XmodForSequenceClassification"),
909
+ ("yoso", "YosoForSequenceClassification"),
910
+ ]
911
+ )
912
+
913
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
914
+ [
915
+ # Model for Question Answering mapping
916
+ ("albert", "AlbertForQuestionAnswering"),
917
+ ("bart", "BartForQuestionAnswering"),
918
+ ("bert", "BertForQuestionAnswering"),
919
+ ("big_bird", "BigBirdForQuestionAnswering"),
920
+ ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"),
921
+ ("bloom", "BloomForQuestionAnswering"),
922
+ ("camembert", "CamembertForQuestionAnswering"),
923
+ ("canine", "CanineForQuestionAnswering"),
924
+ ("convbert", "ConvBertForQuestionAnswering"),
925
+ ("data2vec-text", "Data2VecTextForQuestionAnswering"),
926
+ ("deberta", "DebertaForQuestionAnswering"),
927
+ ("deberta-v2", "DebertaV2ForQuestionAnswering"),
928
+ ("distilbert", "DistilBertForQuestionAnswering"),
929
+ ("electra", "ElectraForQuestionAnswering"),
930
+ ("ernie", "ErnieForQuestionAnswering"),
931
+ ("ernie_m", "ErnieMForQuestionAnswering"),
932
+ ("falcon", "FalconForQuestionAnswering"),
933
+ ("flaubert", "FlaubertForQuestionAnsweringSimple"),
934
+ ("fnet", "FNetForQuestionAnswering"),
935
+ ("funnel", "FunnelForQuestionAnswering"),
936
+ ("gpt2", "GPT2ForQuestionAnswering"),
937
+ ("gpt_neo", "GPTNeoForQuestionAnswering"),
938
+ ("gpt_neox", "GPTNeoXForQuestionAnswering"),
939
+ ("gptj", "GPTJForQuestionAnswering"),
940
+ ("ibert", "IBertForQuestionAnswering"),
941
+ ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
942
+ ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
943
+ ("led", "LEDForQuestionAnswering"),
944
+ ("lilt", "LiltForQuestionAnswering"),
945
+ ("llama", "LlamaForQuestionAnswering"),
946
+ ("longformer", "LongformerForQuestionAnswering"),
947
+ ("luke", "LukeForQuestionAnswering"),
948
+ ("lxmert", "LxmertForQuestionAnswering"),
949
+ ("markuplm", "MarkupLMForQuestionAnswering"),
950
+ ("mbart", "MBartForQuestionAnswering"),
951
+ ("mega", "MegaForQuestionAnswering"),
952
+ ("megatron-bert", "MegatronBertForQuestionAnswering"),
953
+ ("mobilebert", "MobileBertForQuestionAnswering"),
954
+ ("mpnet", "MPNetForQuestionAnswering"),
955
+ ("mpt", "MptForQuestionAnswering"),
956
+ ("mra", "MraForQuestionAnswering"),
957
+ ("mt5", "MT5ForQuestionAnswering"),
958
+ ("mvp", "MvpForQuestionAnswering"),
959
+ ("nezha", "NezhaForQuestionAnswering"),
960
+ ("nystromformer", "NystromformerForQuestionAnswering"),
961
+ ("opt", "OPTForQuestionAnswering"),
962
+ ("qdqbert", "QDQBertForQuestionAnswering"),
963
+ ("reformer", "ReformerForQuestionAnswering"),
964
+ ("rembert", "RemBertForQuestionAnswering"),
965
+ ("roberta", "RobertaForQuestionAnswering"),
966
+ ("roberta-prelayernorm", "RobertaPreLayerNormForQuestionAnswering"),
967
+ ("roc_bert", "RoCBertForQuestionAnswering"),
968
+ ("roformer", "RoFormerForQuestionAnswering"),
969
+ ("splinter", "SplinterForQuestionAnswering"),
970
+ ("squeezebert", "SqueezeBertForQuestionAnswering"),
971
+ ("t5", "T5ForQuestionAnswering"),
972
+ ("umt5", "UMT5ForQuestionAnswering"),
973
+ ("xlm", "XLMForQuestionAnsweringSimple"),
974
+ ("xlm-roberta", "XLMRobertaForQuestionAnswering"),
975
+ ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"),
976
+ ("xlnet", "XLNetForQuestionAnsweringSimple"),
977
+ ("xmod", "XmodForQuestionAnswering"),
978
+ ("yoso", "YosoForQuestionAnswering"),
979
+ ]
980
+ )
981
+
982
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
983
+ [
984
+ # Model for Table Question Answering mapping
985
+ ("tapas", "TapasForQuestionAnswering"),
986
+ ]
987
+ )
988
+
989
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
990
+ [
991
+ ("blip", "BlipForQuestionAnswering"),
992
+ ("blip-2", "Blip2ForConditionalGeneration"),
993
+ ("vilt", "ViltForQuestionAnswering"),
994
+ ]
995
+ )
996
+
997
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
998
+ [
999
+ ("layoutlm", "LayoutLMForQuestionAnswering"),
1000
+ ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
1001
+ ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
1002
+ ]
1003
+ )
1004
+
1005
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1006
+ [
1007
+ # Model for Token Classification mapping
1008
+ ("albert", "AlbertForTokenClassification"),
1009
+ ("bert", "BertForTokenClassification"),
1010
+ ("big_bird", "BigBirdForTokenClassification"),
1011
+ ("biogpt", "BioGptForTokenClassification"),
1012
+ ("bloom", "BloomForTokenClassification"),
1013
+ ("bros", "BrosForTokenClassification"),
1014
+ ("camembert", "CamembertForTokenClassification"),
1015
+ ("canine", "CanineForTokenClassification"),
1016
+ ("convbert", "ConvBertForTokenClassification"),
1017
+ ("data2vec-text", "Data2VecTextForTokenClassification"),
1018
+ ("deberta", "DebertaForTokenClassification"),
1019
+ ("deberta-v2", "DebertaV2ForTokenClassification"),
1020
+ ("distilbert", "DistilBertForTokenClassification"),
1021
+ ("electra", "ElectraForTokenClassification"),
1022
+ ("ernie", "ErnieForTokenClassification"),
1023
+ ("ernie_m", "ErnieMForTokenClassification"),
1024
+ ("esm", "EsmForTokenClassification"),
1025
+ ("falcon", "FalconForTokenClassification"),
1026
+ ("flaubert", "FlaubertForTokenClassification"),
1027
+ ("fnet", "FNetForTokenClassification"),
1028
+ ("funnel", "FunnelForTokenClassification"),
1029
+ ("gpt-sw3", "GPT2ForTokenClassification"),
1030
+ ("gpt2", "GPT2ForTokenClassification"),
1031
+ ("gpt_bigcode", "GPTBigCodeForTokenClassification"),
1032
+ ("gpt_neo", "GPTNeoForTokenClassification"),
1033
+ ("gpt_neox", "GPTNeoXForTokenClassification"),
1034
+ ("ibert", "IBertForTokenClassification"),
1035
+ ("layoutlm", "LayoutLMForTokenClassification"),
1036
+ ("layoutlmv2", "LayoutLMv2ForTokenClassification"),
1037
+ ("layoutlmv3", "LayoutLMv3ForTokenClassification"),
1038
+ ("lilt", "LiltForTokenClassification"),
1039
+ ("longformer", "LongformerForTokenClassification"),
1040
+ ("luke", "LukeForTokenClassification"),
1041
+ ("markuplm", "MarkupLMForTokenClassification"),
1042
+ ("mega", "MegaForTokenClassification"),
1043
+ ("megatron-bert", "MegatronBertForTokenClassification"),
1044
+ ("mobilebert", "MobileBertForTokenClassification"),
1045
+ ("mpnet", "MPNetForTokenClassification"),
1046
+ ("mpt", "MptForTokenClassification"),
1047
+ ("mra", "MraForTokenClassification"),
1048
+ ("mt5", "MT5ForTokenClassification"),
1049
+ ("nezha", "NezhaForTokenClassification"),
1050
+ ("nystromformer", "NystromformerForTokenClassification"),
1051
+ ("phi", "PhiForTokenClassification"),
1052
+ ("qdqbert", "QDQBertForTokenClassification"),
1053
+ ("rembert", "RemBertForTokenClassification"),
1054
+ ("roberta", "RobertaForTokenClassification"),
1055
+ ("roberta-prelayernorm", "RobertaPreLayerNormForTokenClassification"),
1056
+ ("roc_bert", "RoCBertForTokenClassification"),
1057
+ ("roformer", "RoFormerForTokenClassification"),
1058
+ ("squeezebert", "SqueezeBertForTokenClassification"),
1059
+ ("t5", "T5ForTokenClassification"),
1060
+ ("umt5", "UMT5ForTokenClassification"),
1061
+ ("xlm", "XLMForTokenClassification"),
1062
+ ("xlm-roberta", "XLMRobertaForTokenClassification"),
1063
+ ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"),
1064
+ ("xlnet", "XLNetForTokenClassification"),
1065
+ ("xmod", "XmodForTokenClassification"),
1066
+ ("yoso", "YosoForTokenClassification"),
1067
+ ]
1068
+ )
1069
+
1070
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
1071
+ [
1072
+ # Model for Multiple Choice mapping
1073
+ ("albert", "AlbertForMultipleChoice"),
1074
+ ("bert", "BertForMultipleChoice"),
1075
+ ("big_bird", "BigBirdForMultipleChoice"),
1076
+ ("camembert", "CamembertForMultipleChoice"),
1077
+ ("canine", "CanineForMultipleChoice"),
1078
+ ("convbert", "ConvBertForMultipleChoice"),
1079
+ ("data2vec-text", "Data2VecTextForMultipleChoice"),
1080
+ ("deberta-v2", "DebertaV2ForMultipleChoice"),
1081
+ ("distilbert", "DistilBertForMultipleChoice"),
1082
+ ("electra", "ElectraForMultipleChoice"),
1083
+ ("ernie", "ErnieForMultipleChoice"),
1084
+ ("ernie_m", "ErnieMForMultipleChoice"),
1085
+ ("flaubert", "FlaubertForMultipleChoice"),
1086
+ ("fnet", "FNetForMultipleChoice"),
1087
+ ("funnel", "FunnelForMultipleChoice"),
1088
+ ("ibert", "IBertForMultipleChoice"),
1089
+ ("longformer", "LongformerForMultipleChoice"),
1090
+ ("luke", "LukeForMultipleChoice"),
1091
+ ("mega", "MegaForMultipleChoice"),
1092
+ ("megatron-bert", "MegatronBertForMultipleChoice"),
1093
+ ("mobilebert", "MobileBertForMultipleChoice"),
1094
+ ("mpnet", "MPNetForMultipleChoice"),
1095
+ ("mra", "MraForMultipleChoice"),
1096
+ ("nezha", "NezhaForMultipleChoice"),
1097
+ ("nystromformer", "NystromformerForMultipleChoice"),
1098
+ ("qdqbert", "QDQBertForMultipleChoice"),
1099
+ ("rembert", "RemBertForMultipleChoice"),
1100
+ ("roberta", "RobertaForMultipleChoice"),
1101
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMultipleChoice"),
1102
+ ("roc_bert", "RoCBertForMultipleChoice"),
1103
+ ("roformer", "RoFormerForMultipleChoice"),
1104
+ ("squeezebert", "SqueezeBertForMultipleChoice"),
1105
+ ("xlm", "XLMForMultipleChoice"),
1106
+ ("xlm-roberta", "XLMRobertaForMultipleChoice"),
1107
+ ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"),
1108
+ ("xlnet", "XLNetForMultipleChoice"),
1109
+ ("xmod", "XmodForMultipleChoice"),
1110
+ ("yoso", "YosoForMultipleChoice"),
1111
+ ]
1112
+ )
1113
+
1114
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
1115
+ [
1116
+ ("bert", "BertForNextSentencePrediction"),
1117
+ ("ernie", "ErnieForNextSentencePrediction"),
1118
+ ("fnet", "FNetForNextSentencePrediction"),
1119
+ ("megatron-bert", "MegatronBertForNextSentencePrediction"),
1120
+ ("mobilebert", "MobileBertForNextSentencePrediction"),
1121
+ ("nezha", "NezhaForNextSentencePrediction"),
1122
+ ("qdqbert", "QDQBertForNextSentencePrediction"),
1123
+ ]
1124
+ )
1125
+
1126
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1127
+ [
1128
+ # Model for Audio Classification mapping
1129
+ ("audio-spectrogram-transformer", "ASTForAudioClassification"),
1130
+ ("data2vec-audio", "Data2VecAudioForSequenceClassification"),
1131
+ ("hubert", "HubertForSequenceClassification"),
1132
+ ("sew", "SEWForSequenceClassification"),
1133
+ ("sew-d", "SEWDForSequenceClassification"),
1134
+ ("unispeech", "UniSpeechForSequenceClassification"),
1135
+ ("unispeech-sat", "UniSpeechSatForSequenceClassification"),
1136
+ ("wav2vec2", "Wav2Vec2ForSequenceClassification"),
1137
+ ("wav2vec2-bert", "Wav2Vec2BertForSequenceClassification"),
1138
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"),
1139
+ ("wavlm", "WavLMForSequenceClassification"),
1140
+ ("whisper", "WhisperForAudioClassification"),
1141
+ ]
1142
+ )
1143
+
1144
+ MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict(
1145
+ [
1146
+ # Model for Connectionist temporal classification (CTC) mapping
1147
+ ("data2vec-audio", "Data2VecAudioForCTC"),
1148
+ ("hubert", "HubertForCTC"),
1149
+ ("mctct", "MCTCTForCTC"),
1150
+ ("sew", "SEWForCTC"),
1151
+ ("sew-d", "SEWDForCTC"),
1152
+ ("unispeech", "UniSpeechForCTC"),
1153
+ ("unispeech-sat", "UniSpeechSatForCTC"),
1154
+ ("wav2vec2", "Wav2Vec2ForCTC"),
1155
+ ("wav2vec2-bert", "Wav2Vec2BertForCTC"),
1156
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"),
1157
+ ("wavlm", "WavLMForCTC"),
1158
+ ]
1159
+ )
1160
+
1161
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1162
+ [
1163
+ # Model for Audio Classification mapping
1164
+ ("data2vec-audio", "Data2VecAudioForAudioFrameClassification"),
1165
+ ("unispeech-sat", "UniSpeechSatForAudioFrameClassification"),
1166
+ ("wav2vec2", "Wav2Vec2ForAudioFrameClassification"),
1167
+ ("wav2vec2-bert", "Wav2Vec2BertForAudioFrameClassification"),
1168
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForAudioFrameClassification"),
1169
+ ("wavlm", "WavLMForAudioFrameClassification"),
1170
+ ]
1171
+ )
1172
+
1173
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict(
1174
+ [
1175
+ # Model for Audio Classification mapping
1176
+ ("data2vec-audio", "Data2VecAudioForXVector"),
1177
+ ("unispeech-sat", "UniSpeechSatForXVector"),
1178
+ ("wav2vec2", "Wav2Vec2ForXVector"),
1179
+ ("wav2vec2-bert", "Wav2Vec2BertForXVector"),
1180
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForXVector"),
1181
+ ("wavlm", "WavLMForXVector"),
1182
+ ]
1183
+ )
1184
+
1185
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = OrderedDict(
1186
+ [
1187
+ # Model for Text-To-Spectrogram mapping
1188
+ ("fastspeech2_conformer", "FastSpeech2ConformerModel"),
1189
+ ("speecht5", "SpeechT5ForTextToSpeech"),
1190
+ ]
1191
+ )
1192
+
1193
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = OrderedDict(
1194
+ [
1195
+ # Model for Text-To-Waveform mapping
1196
+ ("bark", "BarkModel"),
1197
+ ("fastspeech2_conformer", "FastSpeech2ConformerWithHifiGan"),
1198
+ ("musicgen", "MusicgenForConditionalGeneration"),
1199
+ ("musicgen_melody", "MusicgenMelodyForConditionalGeneration"),
1200
+ ("seamless_m4t", "SeamlessM4TForTextToSpeech"),
1201
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToSpeech"),
1202
+ ("vits", "VitsModel"),
1203
+ ]
1204
+ )
1205
+
1206
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1207
+ [
1208
+ # Model for Zero Shot Image Classification mapping
1209
+ ("align", "AlignModel"),
1210
+ ("altclip", "AltCLIPModel"),
1211
+ ("blip", "BlipModel"),
1212
+ ("chinese_clip", "ChineseCLIPModel"),
1213
+ ("clip", "CLIPModel"),
1214
+ ("clipseg", "CLIPSegModel"),
1215
+ ("siglip", "SiglipModel"),
1216
+ ]
1217
+ )
1218
+
1219
+ MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict(
1220
+ [
1221
+ # Backbone mapping
1222
+ ("beit", "BeitBackbone"),
1223
+ ("bit", "BitBackbone"),
1224
+ ("convnext", "ConvNextBackbone"),
1225
+ ("convnextv2", "ConvNextV2Backbone"),
1226
+ ("dinat", "DinatBackbone"),
1227
+ ("dinov2", "Dinov2Backbone"),
1228
+ ("focalnet", "FocalNetBackbone"),
1229
+ ("maskformer-swin", "MaskFormerSwinBackbone"),
1230
+ ("nat", "NatBackbone"),
1231
+ ("pvt_v2", "PvtV2Backbone"),
1232
+ ("resnet", "ResNetBackbone"),
1233
+ ("swin", "SwinBackbone"),
1234
+ ("swinv2", "Swinv2Backbone"),
1235
+ ("timm_backbone", "TimmBackbone"),
1236
+ ("vitdet", "VitDetBackbone"),
1237
+ ]
1238
+ )
1239
+
1240
+ MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict(
1241
+ [
1242
+ ("sam", "SamModel"),
1243
+ ]
1244
+ )
1245
+
1246
+
1247
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES = OrderedDict(
1248
+ [
1249
+ ("superpoint", "SuperPointForKeypointDetection"),
1250
+ ]
1251
+ )
1252
+
1253
+
1254
+ MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict(
1255
+ [
1256
+ ("albert", "AlbertModel"),
1257
+ ("bert", "BertModel"),
1258
+ ("big_bird", "BigBirdModel"),
1259
+ ("data2vec-text", "Data2VecTextModel"),
1260
+ ("deberta", "DebertaModel"),
1261
+ ("deberta-v2", "DebertaV2Model"),
1262
+ ("distilbert", "DistilBertModel"),
1263
+ ("electra", "ElectraModel"),
1264
+ ("flaubert", "FlaubertModel"),
1265
+ ("ibert", "IBertModel"),
1266
+ ("longformer", "LongformerModel"),
1267
+ ("mobilebert", "MobileBertModel"),
1268
+ ("mt5", "MT5EncoderModel"),
1269
+ ("nystromformer", "NystromformerModel"),
1270
+ ("reformer", "ReformerModel"),
1271
+ ("rembert", "RemBertModel"),
1272
+ ("roberta", "RobertaModel"),
1273
+ ("roberta-prelayernorm", "RobertaPreLayerNormModel"),
1274
+ ("roc_bert", "RoCBertModel"),
1275
+ ("roformer", "RoFormerModel"),
1276
+ ("squeezebert", "SqueezeBertModel"),
1277
+ ("t5", "T5EncoderModel"),
1278
+ ("umt5", "UMT5EncoderModel"),
1279
+ ("xlm", "XLMModel"),
1280
+ ("xlm-roberta", "XLMRobertaModel"),
1281
+ ("xlm-roberta-xl", "XLMRobertaXLModel"),
1282
+ ]
1283
+ )
1284
+
1285
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1286
+ [
1287
+ ("patchtsmixer", "PatchTSMixerForTimeSeriesClassification"),
1288
+ ("patchtst", "PatchTSTForClassification"),
1289
+ ]
1290
+ )
1291
+
1292
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict(
1293
+ [
1294
+ ("patchtsmixer", "PatchTSMixerForRegression"),
1295
+ ("patchtst", "PatchTSTForRegression"),
1296
+ ]
1297
+ )
1298
+
1299
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict(
1300
+ [
1301
+ ("swin2sr", "Swin2SRForImageSuperResolution"),
1302
+ ]
1303
+ )
1304
+
1305
+ MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES)
1306
+ MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES)
1307
+ MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES)
1308
+ MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
1309
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
1310
+ CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES
1311
+ )
1312
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1313
+ CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
1314
+ )
1315
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1316
+ CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES
1317
+ )
1318
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping(
1319
+ CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES
1320
+ )
1321
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(
1322
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES
1323
+ )
1324
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping(
1325
+ CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES
1326
+ )
1327
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping(
1328
+ CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES
1329
+ )
1330
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1331
+ CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES
1332
+ )
1333
+ MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
1334
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1335
+ CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES
1336
+ )
1337
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1338
+ CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
1339
+ )
1340
+ MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES)
1341
+ MODEL_FOR_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_MAPPING_NAMES)
1342
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
1343
+ CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
1344
+ )
1345
+ MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES)
1346
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(
1347
+ CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES
1348
+ )
1349
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
1350
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
1351
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
1352
+ )
1353
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1354
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
1355
+ )
1356
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1357
+ CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
1358
+ )
1359
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1360
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
1361
+ )
1362
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1363
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
1364
+ )
1365
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES)
1366
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
1367
+ CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
1368
+ )
1369
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1370
+ CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
1371
+ )
1372
+ MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES)
1373
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES)
1374
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1375
+ CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES
1376
+ )
1377
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES)
1378
+
1379
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = _LazyAutoMapping(
1380
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES
1381
+ )
1382
+
1383
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES)
1384
+
1385
+ MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES)
1386
+
1387
+ MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASK_GENERATION_MAPPING_NAMES)
1388
+
1389
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING = _LazyAutoMapping(
1390
+ CONFIG_MAPPING_NAMES, MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES
1391
+ )
1392
+
1393
+ MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES)
1394
+
1395
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1396
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES
1397
+ )
1398
+
1399
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping(
1400
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES
1401
+ )
1402
+
1403
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES)
1404
+
1405
+
1406
+ class AutoModelForMaskGeneration(_BaseAutoModelClass):
1407
+ _model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING
1408
+
1409
+
1410
+ class AutoModelForKeypointDetection(_BaseAutoModelClass):
1411
+ _model_mapping = MODEL_FOR_KEYPOINT_DETECTION_MAPPING
1412
+
1413
+
1414
+ class AutoModelForTextEncoding(_BaseAutoModelClass):
1415
+ _model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING
1416
+
1417
+
1418
+ class AutoModelForImageToImage(_BaseAutoModelClass):
1419
+ _model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING
1420
+
1421
+
1422
+ class AutoModel(_BaseAutoModelClass):
1423
+ _model_mapping = MODEL_MAPPING
1424
+
1425
+
1426
+ AutoModel = auto_class_update(AutoModel)
1427
+
1428
+
1429
+ class AutoModelForPreTraining(_BaseAutoModelClass):
1430
+ _model_mapping = MODEL_FOR_PRETRAINING_MAPPING
1431
+
1432
+
1433
+ AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining")
1434
+
1435
+
1436
+ # Private on purpose, the public class will add the deprecation warnings.
1437
+ class _AutoModelWithLMHead(_BaseAutoModelClass):
1438
+ _model_mapping = MODEL_WITH_LM_HEAD_MAPPING
1439
+
1440
+
1441
+ _AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling")
1442
+
1443
+
1444
+ class AutoModelForCausalLM(_BaseAutoModelClass):
1445
+ _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
1446
+
1447
+
1448
+ AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling")
1449
+
1450
+
1451
+ class AutoModelForMaskedLM(_BaseAutoModelClass):
1452
+ _model_mapping = MODEL_FOR_MASKED_LM_MAPPING
1453
+
1454
+
1455
+ AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling")
1456
+
1457
+
1458
+ class AutoModelForSeq2SeqLM(_BaseAutoModelClass):
1459
+ _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
1460
+
1461
+
1462
+ AutoModelForSeq2SeqLM = auto_class_update(
1463
+ AutoModelForSeq2SeqLM,
1464
+ head_doc="sequence-to-sequence language modeling",
1465
+ checkpoint_for_example="google-t5/t5-base",
1466
+ )
1467
+
1468
+
1469
+ class AutoModelForSequenceClassification(_BaseAutoModelClass):
1470
+ _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
1471
+
1472
+
1473
+ AutoModelForSequenceClassification = auto_class_update(
1474
+ AutoModelForSequenceClassification, head_doc="sequence classification"
1475
+ )
1476
+
1477
+
1478
+ class AutoModelForQuestionAnswering(_BaseAutoModelClass):
1479
+ _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
1480
+
1481
+
1482
+ AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering")
1483
+
1484
+
1485
+ class AutoModelForTableQuestionAnswering(_BaseAutoModelClass):
1486
+ _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
1487
+
1488
+
1489
+ AutoModelForTableQuestionAnswering = auto_class_update(
1490
+ AutoModelForTableQuestionAnswering,
1491
+ head_doc="table question answering",
1492
+ checkpoint_for_example="google/tapas-base-finetuned-wtq",
1493
+ )
1494
+
1495
+
1496
+ class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass):
1497
+ _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
1498
+
1499
+
1500
+ AutoModelForVisualQuestionAnswering = auto_class_update(
1501
+ AutoModelForVisualQuestionAnswering,
1502
+ head_doc="visual question answering",
1503
+ checkpoint_for_example="dandelin/vilt-b32-finetuned-vqa",
1504
+ )
1505
+
1506
+
1507
+ class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
1508
+ _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
1509
+
1510
+
1511
+ AutoModelForDocumentQuestionAnswering = auto_class_update(
1512
+ AutoModelForDocumentQuestionAnswering,
1513
+ head_doc="document question answering",
1514
+ checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3',
1515
+ )
1516
+
1517
+
1518
+ class AutoModelForTokenClassification(_BaseAutoModelClass):
1519
+ _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
1520
+
1521
+
1522
+ AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification")
1523
+
1524
+
1525
+ class AutoModelForMultipleChoice(_BaseAutoModelClass):
1526
+ _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING
1527
+
1528
+
1529
+ AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice")
1530
+
1531
+
1532
+ class AutoModelForNextSentencePrediction(_BaseAutoModelClass):
1533
+ _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
1534
+
1535
+
1536
+ AutoModelForNextSentencePrediction = auto_class_update(
1537
+ AutoModelForNextSentencePrediction, head_doc="next sentence prediction"
1538
+ )
1539
+
1540
+
1541
+ class AutoModelForImageClassification(_BaseAutoModelClass):
1542
+ _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
1543
+
1544
+
1545
+ AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc="image classification")
1546
+
1547
+
1548
+ class AutoModelForZeroShotImageClassification(_BaseAutoModelClass):
1549
+ _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
1550
+
1551
+
1552
+ AutoModelForZeroShotImageClassification = auto_class_update(
1553
+ AutoModelForZeroShotImageClassification, head_doc="zero-shot image classification"
1554
+ )
1555
+
1556
+
1557
+ class AutoModelForImageSegmentation(_BaseAutoModelClass):
1558
+ _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
1559
+
1560
+
1561
+ AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc="image segmentation")
1562
+
1563
+
1564
+ class AutoModelForSemanticSegmentation(_BaseAutoModelClass):
1565
+ _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
1566
+
1567
+
1568
+ AutoModelForSemanticSegmentation = auto_class_update(
1569
+ AutoModelForSemanticSegmentation, head_doc="semantic segmentation"
1570
+ )
1571
+
1572
+
1573
+ class AutoModelForUniversalSegmentation(_BaseAutoModelClass):
1574
+ _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING
1575
+
1576
+
1577
+ AutoModelForUniversalSegmentation = auto_class_update(
1578
+ AutoModelForUniversalSegmentation, head_doc="universal image segmentation"
1579
+ )
1580
+
1581
+
1582
+ class AutoModelForInstanceSegmentation(_BaseAutoModelClass):
1583
+ _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING
1584
+
1585
+
1586
+ AutoModelForInstanceSegmentation = auto_class_update(
1587
+ AutoModelForInstanceSegmentation, head_doc="instance segmentation"
1588
+ )
1589
+
1590
+
1591
+ class AutoModelForObjectDetection(_BaseAutoModelClass):
1592
+ _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
1593
+
1594
+
1595
+ AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection")
1596
+
1597
+
1598
+ class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass):
1599
+ _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
1600
+
1601
+
1602
+ AutoModelForZeroShotObjectDetection = auto_class_update(
1603
+ AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection"
1604
+ )
1605
+
1606
+
1607
+ class AutoModelForDepthEstimation(_BaseAutoModelClass):
1608
+ _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
1609
+
1610
+
1611
+ AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation")
1612
+
1613
+
1614
+ class AutoModelForVideoClassification(_BaseAutoModelClass):
1615
+ _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
1616
+
1617
+
1618
+ AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification")
1619
+
1620
+
1621
+ class AutoModelForVision2Seq(_BaseAutoModelClass):
1622
+ _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
1623
+
1624
+
1625
+ AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling")
1626
+
1627
+
1628
+ class AutoModelForAudioClassification(_BaseAutoModelClass):
1629
+ _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
1630
+
1631
+
1632
+ AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc="audio classification")
1633
+
1634
+
1635
+ class AutoModelForCTC(_BaseAutoModelClass):
1636
+ _model_mapping = MODEL_FOR_CTC_MAPPING
1637
+
1638
+
1639
+ AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc="connectionist temporal classification")
1640
+
1641
+
1642
+ class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
1643
+ _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
1644
+
1645
+
1646
+ AutoModelForSpeechSeq2Seq = auto_class_update(
1647
+ AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
1648
+ )
1649
+
1650
+
1651
+ class AutoModelForAudioFrameClassification(_BaseAutoModelClass):
1652
+ _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING
1653
+
1654
+
1655
+ AutoModelForAudioFrameClassification = auto_class_update(
1656
+ AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification"
1657
+ )
1658
+
1659
+
1660
+ class AutoModelForAudioXVector(_BaseAutoModelClass):
1661
+ _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING
1662
+
1663
+
1664
+ class AutoModelForTextToSpectrogram(_BaseAutoModelClass):
1665
+ _model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
1666
+
1667
+
1668
+ class AutoModelForTextToWaveform(_BaseAutoModelClass):
1669
+ _model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
1670
+
1671
+
1672
+ class AutoBackbone(_BaseAutoBackboneClass):
1673
+ _model_mapping = MODEL_FOR_BACKBONE_MAPPING
1674
+
1675
+
1676
+ AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc="audio retrieval via x-vector")
1677
+
1678
+
1679
+ class AutoModelForMaskedImageModeling(_BaseAutoModelClass):
1680
+ _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
1681
+
1682
+
1683
+ AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling")
1684
+
1685
+
1686
+ class AutoModelWithLMHead(_AutoModelWithLMHead):
1687
+ @classmethod
1688
+ def from_config(cls, config):
1689
+ warnings.warn(
1690
+ "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
1691
+ "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
1692
+ "`AutoModelForSeq2SeqLM` for encoder-decoder models.",
1693
+ FutureWarning,
1694
+ )
1695
+ return super().from_config(config)
1696
+
1697
+ @classmethod
1698
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
1699
+ warnings.warn(
1700
+ "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
1701
+ "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
1702
+ "`AutoModelForSeq2SeqLM` for encoder-decoder models.",
1703
+ FutureWarning,
1704
+ )
1705
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Model class."""
16
+
17
+
18
+ from collections import OrderedDict
19
+
20
+ from ...utils import logging
21
+ from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
22
+ from .configuration_auto import CONFIG_MAPPING_NAMES
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ FLAX_MODEL_MAPPING_NAMES = OrderedDict(
29
+ [
30
+ # Base model mapping
31
+ ("albert", "FlaxAlbertModel"),
32
+ ("bart", "FlaxBartModel"),
33
+ ("beit", "FlaxBeitModel"),
34
+ ("bert", "FlaxBertModel"),
35
+ ("big_bird", "FlaxBigBirdModel"),
36
+ ("blenderbot", "FlaxBlenderbotModel"),
37
+ ("blenderbot-small", "FlaxBlenderbotSmallModel"),
38
+ ("bloom", "FlaxBloomModel"),
39
+ ("clip", "FlaxCLIPModel"),
40
+ ("distilbert", "FlaxDistilBertModel"),
41
+ ("electra", "FlaxElectraModel"),
42
+ ("gemma", "FlaxGemmaModel"),
43
+ ("gpt-sw3", "FlaxGPT2Model"),
44
+ ("gpt2", "FlaxGPT2Model"),
45
+ ("gpt_neo", "FlaxGPTNeoModel"),
46
+ ("gptj", "FlaxGPTJModel"),
47
+ ("llama", "FlaxLlamaModel"),
48
+ ("longt5", "FlaxLongT5Model"),
49
+ ("marian", "FlaxMarianModel"),
50
+ ("mbart", "FlaxMBartModel"),
51
+ ("mistral", "FlaxMistralModel"),
52
+ ("mt5", "FlaxMT5Model"),
53
+ ("opt", "FlaxOPTModel"),
54
+ ("pegasus", "FlaxPegasusModel"),
55
+ ("regnet", "FlaxRegNetModel"),
56
+ ("resnet", "FlaxResNetModel"),
57
+ ("roberta", "FlaxRobertaModel"),
58
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
59
+ ("roformer", "FlaxRoFormerModel"),
60
+ ("t5", "FlaxT5Model"),
61
+ ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
62
+ ("vit", "FlaxViTModel"),
63
+ ("wav2vec2", "FlaxWav2Vec2Model"),
64
+ ("whisper", "FlaxWhisperModel"),
65
+ ("xglm", "FlaxXGLMModel"),
66
+ ("xlm-roberta", "FlaxXLMRobertaModel"),
67
+ ]
68
+ )
69
+
70
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
71
+ [
72
+ # Model for pre-training mapping
73
+ ("albert", "FlaxAlbertForPreTraining"),
74
+ ("bart", "FlaxBartForConditionalGeneration"),
75
+ ("bert", "FlaxBertForPreTraining"),
76
+ ("big_bird", "FlaxBigBirdForPreTraining"),
77
+ ("electra", "FlaxElectraForPreTraining"),
78
+ ("longt5", "FlaxLongT5ForConditionalGeneration"),
79
+ ("mbart", "FlaxMBartForConditionalGeneration"),
80
+ ("mt5", "FlaxMT5ForConditionalGeneration"),
81
+ ("roberta", "FlaxRobertaForMaskedLM"),
82
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
83
+ ("roformer", "FlaxRoFormerForMaskedLM"),
84
+ ("t5", "FlaxT5ForConditionalGeneration"),
85
+ ("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
86
+ ("whisper", "FlaxWhisperForConditionalGeneration"),
87
+ ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
88
+ ]
89
+ )
90
+
91
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
92
+ [
93
+ # Model for Masked LM mapping
94
+ ("albert", "FlaxAlbertForMaskedLM"),
95
+ ("bart", "FlaxBartForConditionalGeneration"),
96
+ ("bert", "FlaxBertForMaskedLM"),
97
+ ("big_bird", "FlaxBigBirdForMaskedLM"),
98
+ ("distilbert", "FlaxDistilBertForMaskedLM"),
99
+ ("electra", "FlaxElectraForMaskedLM"),
100
+ ("mbart", "FlaxMBartForConditionalGeneration"),
101
+ ("roberta", "FlaxRobertaForMaskedLM"),
102
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
103
+ ("roformer", "FlaxRoFormerForMaskedLM"),
104
+ ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
105
+ ]
106
+ )
107
+
108
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
109
+ [
110
+ # Model for Seq2Seq Causal LM mapping
111
+ ("bart", "FlaxBartForConditionalGeneration"),
112
+ ("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
113
+ ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
114
+ ("encoder-decoder", "FlaxEncoderDecoderModel"),
115
+ ("longt5", "FlaxLongT5ForConditionalGeneration"),
116
+ ("marian", "FlaxMarianMTModel"),
117
+ ("mbart", "FlaxMBartForConditionalGeneration"),
118
+ ("mt5", "FlaxMT5ForConditionalGeneration"),
119
+ ("pegasus", "FlaxPegasusForConditionalGeneration"),
120
+ ("t5", "FlaxT5ForConditionalGeneration"),
121
+ ]
122
+ )
123
+
124
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
125
+ [
126
+ # Model for Image-classsification
127
+ ("beit", "FlaxBeitForImageClassification"),
128
+ ("regnet", "FlaxRegNetForImageClassification"),
129
+ ("resnet", "FlaxResNetForImageClassification"),
130
+ ("vit", "FlaxViTForImageClassification"),
131
+ ]
132
+ )
133
+
134
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
135
+ [
136
+ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
137
+ ]
138
+ )
139
+
140
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
141
+ [
142
+ # Model for Causal LM mapping
143
+ ("bart", "FlaxBartForCausalLM"),
144
+ ("bert", "FlaxBertForCausalLM"),
145
+ ("big_bird", "FlaxBigBirdForCausalLM"),
146
+ ("bloom", "FlaxBloomForCausalLM"),
147
+ ("electra", "FlaxElectraForCausalLM"),
148
+ ("gemma", "FlaxGemmaForCausalLM"),
149
+ ("gpt-sw3", "FlaxGPT2LMHeadModel"),
150
+ ("gpt2", "FlaxGPT2LMHeadModel"),
151
+ ("gpt_neo", "FlaxGPTNeoForCausalLM"),
152
+ ("gptj", "FlaxGPTJForCausalLM"),
153
+ ("llama", "FlaxLlamaForCausalLM"),
154
+ ("mistral", "FlaxMistralForCausalLM"),
155
+ ("opt", "FlaxOPTForCausalLM"),
156
+ ("roberta", "FlaxRobertaForCausalLM"),
157
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
158
+ ("xglm", "FlaxXGLMForCausalLM"),
159
+ ("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
160
+ ]
161
+ )
162
+
163
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
164
+ [
165
+ # Model for Sequence Classification mapping
166
+ ("albert", "FlaxAlbertForSequenceClassification"),
167
+ ("bart", "FlaxBartForSequenceClassification"),
168
+ ("bert", "FlaxBertForSequenceClassification"),
169
+ ("big_bird", "FlaxBigBirdForSequenceClassification"),
170
+ ("distilbert", "FlaxDistilBertForSequenceClassification"),
171
+ ("electra", "FlaxElectraForSequenceClassification"),
172
+ ("mbart", "FlaxMBartForSequenceClassification"),
173
+ ("roberta", "FlaxRobertaForSequenceClassification"),
174
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
175
+ ("roformer", "FlaxRoFormerForSequenceClassification"),
176
+ ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
177
+ ]
178
+ )
179
+
180
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
181
+ [
182
+ # Model for Question Answering mapping
183
+ ("albert", "FlaxAlbertForQuestionAnswering"),
184
+ ("bart", "FlaxBartForQuestionAnswering"),
185
+ ("bert", "FlaxBertForQuestionAnswering"),
186
+ ("big_bird", "FlaxBigBirdForQuestionAnswering"),
187
+ ("distilbert", "FlaxDistilBertForQuestionAnswering"),
188
+ ("electra", "FlaxElectraForQuestionAnswering"),
189
+ ("mbart", "FlaxMBartForQuestionAnswering"),
190
+ ("roberta", "FlaxRobertaForQuestionAnswering"),
191
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
192
+ ("roformer", "FlaxRoFormerForQuestionAnswering"),
193
+ ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
194
+ ]
195
+ )
196
+
197
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
198
+ [
199
+ # Model for Token Classification mapping
200
+ ("albert", "FlaxAlbertForTokenClassification"),
201
+ ("bert", "FlaxBertForTokenClassification"),
202
+ ("big_bird", "FlaxBigBirdForTokenClassification"),
203
+ ("distilbert", "FlaxDistilBertForTokenClassification"),
204
+ ("electra", "FlaxElectraForTokenClassification"),
205
+ ("roberta", "FlaxRobertaForTokenClassification"),
206
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
207
+ ("roformer", "FlaxRoFormerForTokenClassification"),
208
+ ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
209
+ ]
210
+ )
211
+
212
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
213
+ [
214
+ # Model for Multiple Choice mapping
215
+ ("albert", "FlaxAlbertForMultipleChoice"),
216
+ ("bert", "FlaxBertForMultipleChoice"),
217
+ ("big_bird", "FlaxBigBirdForMultipleChoice"),
218
+ ("distilbert", "FlaxDistilBertForMultipleChoice"),
219
+ ("electra", "FlaxElectraForMultipleChoice"),
220
+ ("roberta", "FlaxRobertaForMultipleChoice"),
221
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
222
+ ("roformer", "FlaxRoFormerForMultipleChoice"),
223
+ ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
224
+ ]
225
+ )
226
+
227
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
228
+ [
229
+ ("bert", "FlaxBertForNextSentencePrediction"),
230
+ ]
231
+ )
232
+
233
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
234
+ [
235
+ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
236
+ ("whisper", "FlaxWhisperForConditionalGeneration"),
237
+ ]
238
+ )
239
+
240
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
241
+ [
242
+ ("whisper", "FlaxWhisperForAudioClassification"),
243
+ ]
244
+ )
245
+
246
+ FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
247
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
248
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
249
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
250
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
251
+ )
252
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
253
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
254
+ )
255
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
256
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
257
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
258
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
259
+ )
260
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
261
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
262
+ )
263
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
264
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
265
+ )
266
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
267
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
268
+ )
269
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
270
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
271
+ )
272
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(
273
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
274
+ )
275
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
276
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
277
+ )
278
+
279
+
280
+ class FlaxAutoModel(_BaseAutoModelClass):
281
+ _model_mapping = FLAX_MODEL_MAPPING
282
+
283
+
284
+ FlaxAutoModel = auto_class_update(FlaxAutoModel)
285
+
286
+
287
+ class FlaxAutoModelForPreTraining(_BaseAutoModelClass):
288
+ _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING
289
+
290
+
291
+ FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
292
+
293
+
294
+ class FlaxAutoModelForCausalLM(_BaseAutoModelClass):
295
+ _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
296
+
297
+
298
+ FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
299
+
300
+
301
+ class FlaxAutoModelForMaskedLM(_BaseAutoModelClass):
302
+ _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING
303
+
304
+
305
+ FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
306
+
307
+
308
+ class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass):
309
+ _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
310
+
311
+
312
+ FlaxAutoModelForSeq2SeqLM = auto_class_update(
313
+ FlaxAutoModelForSeq2SeqLM,
314
+ head_doc="sequence-to-sequence language modeling",
315
+ checkpoint_for_example="google-t5/t5-base",
316
+ )
317
+
318
+
319
+ class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass):
320
+ _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
321
+
322
+
323
+ FlaxAutoModelForSequenceClassification = auto_class_update(
324
+ FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
325
+ )
326
+
327
+
328
+ class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass):
329
+ _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
330
+
331
+
332
+ FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
333
+
334
+
335
+ class FlaxAutoModelForTokenClassification(_BaseAutoModelClass):
336
+ _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
337
+
338
+
339
+ FlaxAutoModelForTokenClassification = auto_class_update(
340
+ FlaxAutoModelForTokenClassification, head_doc="token classification"
341
+ )
342
+
343
+
344
+ class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass):
345
+ _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
346
+
347
+
348
+ FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
349
+
350
+
351
+ class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass):
352
+ _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
353
+
354
+
355
+ FlaxAutoModelForNextSentencePrediction = auto_class_update(
356
+ FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
357
+ )
358
+
359
+
360
+ class FlaxAutoModelForImageClassification(_BaseAutoModelClass):
361
+ _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
362
+
363
+
364
+ FlaxAutoModelForImageClassification = auto_class_update(
365
+ FlaxAutoModelForImageClassification, head_doc="image classification"
366
+ )
367
+
368
+
369
+ class FlaxAutoModelForVision2Seq(_BaseAutoModelClass):
370
+ _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
371
+
372
+
373
+ FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc="vision-to-text modeling")
374
+
375
+
376
+ class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
377
+ _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
378
+
379
+
380
+ FlaxAutoModelForSpeechSeq2Seq = auto_class_update(
381
+ FlaxAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
382
+ )
venv/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Model class."""
16
+
17
+
18
+ import warnings
19
+ from collections import OrderedDict
20
+
21
+ from ...utils import logging
22
+ from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
23
+ from .configuration_auto import CONFIG_MAPPING_NAMES
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ TF_MODEL_MAPPING_NAMES = OrderedDict(
30
+ [
31
+ # Base model mapping
32
+ ("albert", "TFAlbertModel"),
33
+ ("bart", "TFBartModel"),
34
+ ("bert", "TFBertModel"),
35
+ ("blenderbot", "TFBlenderbotModel"),
36
+ ("blenderbot-small", "TFBlenderbotSmallModel"),
37
+ ("blip", "TFBlipModel"),
38
+ ("camembert", "TFCamembertModel"),
39
+ ("clip", "TFCLIPModel"),
40
+ ("convbert", "TFConvBertModel"),
41
+ ("convnext", "TFConvNextModel"),
42
+ ("convnextv2", "TFConvNextV2Model"),
43
+ ("ctrl", "TFCTRLModel"),
44
+ ("cvt", "TFCvtModel"),
45
+ ("data2vec-vision", "TFData2VecVisionModel"),
46
+ ("deberta", "TFDebertaModel"),
47
+ ("deberta-v2", "TFDebertaV2Model"),
48
+ ("deit", "TFDeiTModel"),
49
+ ("distilbert", "TFDistilBertModel"),
50
+ ("dpr", "TFDPRQuestionEncoder"),
51
+ ("efficientformer", "TFEfficientFormerModel"),
52
+ ("electra", "TFElectraModel"),
53
+ ("esm", "TFEsmModel"),
54
+ ("flaubert", "TFFlaubertModel"),
55
+ ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")),
56
+ ("gpt-sw3", "TFGPT2Model"),
57
+ ("gpt2", "TFGPT2Model"),
58
+ ("gptj", "TFGPTJModel"),
59
+ ("groupvit", "TFGroupViTModel"),
60
+ ("hubert", "TFHubertModel"),
61
+ ("layoutlm", "TFLayoutLMModel"),
62
+ ("layoutlmv3", "TFLayoutLMv3Model"),
63
+ ("led", "TFLEDModel"),
64
+ ("longformer", "TFLongformerModel"),
65
+ ("lxmert", "TFLxmertModel"),
66
+ ("marian", "TFMarianModel"),
67
+ ("mbart", "TFMBartModel"),
68
+ ("mobilebert", "TFMobileBertModel"),
69
+ ("mobilevit", "TFMobileViTModel"),
70
+ ("mpnet", "TFMPNetModel"),
71
+ ("mt5", "TFMT5Model"),
72
+ ("openai-gpt", "TFOpenAIGPTModel"),
73
+ ("opt", "TFOPTModel"),
74
+ ("pegasus", "TFPegasusModel"),
75
+ ("regnet", "TFRegNetModel"),
76
+ ("rembert", "TFRemBertModel"),
77
+ ("resnet", "TFResNetModel"),
78
+ ("roberta", "TFRobertaModel"),
79
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"),
80
+ ("roformer", "TFRoFormerModel"),
81
+ ("sam", "TFSamModel"),
82
+ ("segformer", "TFSegformerModel"),
83
+ ("speech_to_text", "TFSpeech2TextModel"),
84
+ ("swin", "TFSwinModel"),
85
+ ("t5", "TFT5Model"),
86
+ ("tapas", "TFTapasModel"),
87
+ ("transfo-xl", "TFTransfoXLModel"),
88
+ ("vision-text-dual-encoder", "TFVisionTextDualEncoderModel"),
89
+ ("vit", "TFViTModel"),
90
+ ("vit_mae", "TFViTMAEModel"),
91
+ ("wav2vec2", "TFWav2Vec2Model"),
92
+ ("whisper", "TFWhisperModel"),
93
+ ("xglm", "TFXGLMModel"),
94
+ ("xlm", "TFXLMModel"),
95
+ ("xlm-roberta", "TFXLMRobertaModel"),
96
+ ("xlnet", "TFXLNetModel"),
97
+ ]
98
+ )
99
+
100
+ TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
101
+ [
102
+ # Model for pre-training mapping
103
+ ("albert", "TFAlbertForPreTraining"),
104
+ ("bart", "TFBartForConditionalGeneration"),
105
+ ("bert", "TFBertForPreTraining"),
106
+ ("camembert", "TFCamembertForMaskedLM"),
107
+ ("ctrl", "TFCTRLLMHeadModel"),
108
+ ("distilbert", "TFDistilBertForMaskedLM"),
109
+ ("electra", "TFElectraForPreTraining"),
110
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
111
+ ("funnel", "TFFunnelForPreTraining"),
112
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
113
+ ("gpt2", "TFGPT2LMHeadModel"),
114
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
115
+ ("lxmert", "TFLxmertForPreTraining"),
116
+ ("mobilebert", "TFMobileBertForPreTraining"),
117
+ ("mpnet", "TFMPNetForMaskedLM"),
118
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
119
+ ("roberta", "TFRobertaForMaskedLM"),
120
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
121
+ ("t5", "TFT5ForConditionalGeneration"),
122
+ ("tapas", "TFTapasForMaskedLM"),
123
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
124
+ ("vit_mae", "TFViTMAEForPreTraining"),
125
+ ("xlm", "TFXLMWithLMHeadModel"),
126
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
127
+ ("xlnet", "TFXLNetLMHeadModel"),
128
+ ]
129
+ )
130
+
131
+ TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
132
+ [
133
+ # Model with LM heads mapping
134
+ ("albert", "TFAlbertForMaskedLM"),
135
+ ("bart", "TFBartForConditionalGeneration"),
136
+ ("bert", "TFBertForMaskedLM"),
137
+ ("camembert", "TFCamembertForMaskedLM"),
138
+ ("convbert", "TFConvBertForMaskedLM"),
139
+ ("ctrl", "TFCTRLLMHeadModel"),
140
+ ("distilbert", "TFDistilBertForMaskedLM"),
141
+ ("electra", "TFElectraForMaskedLM"),
142
+ ("esm", "TFEsmForMaskedLM"),
143
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
144
+ ("funnel", "TFFunnelForMaskedLM"),
145
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
146
+ ("gpt2", "TFGPT2LMHeadModel"),
147
+ ("gptj", "TFGPTJForCausalLM"),
148
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
149
+ ("led", "TFLEDForConditionalGeneration"),
150
+ ("longformer", "TFLongformerForMaskedLM"),
151
+ ("marian", "TFMarianMTModel"),
152
+ ("mobilebert", "TFMobileBertForMaskedLM"),
153
+ ("mpnet", "TFMPNetForMaskedLM"),
154
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
155
+ ("rembert", "TFRemBertForMaskedLM"),
156
+ ("roberta", "TFRobertaForMaskedLM"),
157
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
158
+ ("roformer", "TFRoFormerForMaskedLM"),
159
+ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
160
+ ("t5", "TFT5ForConditionalGeneration"),
161
+ ("tapas", "TFTapasForMaskedLM"),
162
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
163
+ ("whisper", "TFWhisperForConditionalGeneration"),
164
+ ("xlm", "TFXLMWithLMHeadModel"),
165
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
166
+ ("xlnet", "TFXLNetLMHeadModel"),
167
+ ]
168
+ )
169
+
170
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
171
+ [
172
+ # Model for Causal LM mapping
173
+ ("bert", "TFBertLMHeadModel"),
174
+ ("camembert", "TFCamembertForCausalLM"),
175
+ ("ctrl", "TFCTRLLMHeadModel"),
176
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
177
+ ("gpt2", "TFGPT2LMHeadModel"),
178
+ ("gptj", "TFGPTJForCausalLM"),
179
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
180
+ ("opt", "TFOPTForCausalLM"),
181
+ ("rembert", "TFRemBertForCausalLM"),
182
+ ("roberta", "TFRobertaForCausalLM"),
183
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForCausalLM"),
184
+ ("roformer", "TFRoFormerForCausalLM"),
185
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
186
+ ("xglm", "TFXGLMForCausalLM"),
187
+ ("xlm", "TFXLMWithLMHeadModel"),
188
+ ("xlm-roberta", "TFXLMRobertaForCausalLM"),
189
+ ("xlnet", "TFXLNetLMHeadModel"),
190
+ ]
191
+ )
192
+
193
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
194
+ [
195
+ ("deit", "TFDeiTForMaskedImageModeling"),
196
+ ("swin", "TFSwinForMaskedImageModeling"),
197
+ ]
198
+ )
199
+
200
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
201
+ [
202
+ # Model for Image-classsification
203
+ ("convnext", "TFConvNextForImageClassification"),
204
+ ("convnextv2", "TFConvNextV2ForImageClassification"),
205
+ ("cvt", "TFCvtForImageClassification"),
206
+ ("data2vec-vision", "TFData2VecVisionForImageClassification"),
207
+ ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")),
208
+ (
209
+ "efficientformer",
210
+ ("TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher"),
211
+ ),
212
+ ("mobilevit", "TFMobileViTForImageClassification"),
213
+ ("regnet", "TFRegNetForImageClassification"),
214
+ ("resnet", "TFResNetForImageClassification"),
215
+ ("segformer", "TFSegformerForImageClassification"),
216
+ ("swin", "TFSwinForImageClassification"),
217
+ ("vit", "TFViTForImageClassification"),
218
+ ]
219
+ )
220
+
221
+
222
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
223
+ [
224
+ # Model for Zero Shot Image Classification mapping
225
+ ("blip", "TFBlipModel"),
226
+ ("clip", "TFCLIPModel"),
227
+ ]
228
+ )
229
+
230
+
231
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict(
232
+ [
233
+ # Model for Semantic Segmentation mapping
234
+ ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"),
235
+ ("mobilevit", "TFMobileViTForSemanticSegmentation"),
236
+ ("segformer", "TFSegformerForSemanticSegmentation"),
237
+ ]
238
+ )
239
+
240
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
241
+ [
242
+ ("blip", "TFBlipForConditionalGeneration"),
243
+ ("vision-encoder-decoder", "TFVisionEncoderDecoderModel"),
244
+ ]
245
+ )
246
+
247
+ TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
248
+ [
249
+ # Model for Masked LM mapping
250
+ ("albert", "TFAlbertForMaskedLM"),
251
+ ("bert", "TFBertForMaskedLM"),
252
+ ("camembert", "TFCamembertForMaskedLM"),
253
+ ("convbert", "TFConvBertForMaskedLM"),
254
+ ("deberta", "TFDebertaForMaskedLM"),
255
+ ("deberta-v2", "TFDebertaV2ForMaskedLM"),
256
+ ("distilbert", "TFDistilBertForMaskedLM"),
257
+ ("electra", "TFElectraForMaskedLM"),
258
+ ("esm", "TFEsmForMaskedLM"),
259
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
260
+ ("funnel", "TFFunnelForMaskedLM"),
261
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
262
+ ("longformer", "TFLongformerForMaskedLM"),
263
+ ("mobilebert", "TFMobileBertForMaskedLM"),
264
+ ("mpnet", "TFMPNetForMaskedLM"),
265
+ ("rembert", "TFRemBertForMaskedLM"),
266
+ ("roberta", "TFRobertaForMaskedLM"),
267
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
268
+ ("roformer", "TFRoFormerForMaskedLM"),
269
+ ("tapas", "TFTapasForMaskedLM"),
270
+ ("xlm", "TFXLMWithLMHeadModel"),
271
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
272
+ ]
273
+ )
274
+
275
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
276
+ [
277
+ # Model for Seq2Seq Causal LM mapping
278
+ ("bart", "TFBartForConditionalGeneration"),
279
+ ("blenderbot", "TFBlenderbotForConditionalGeneration"),
280
+ ("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"),
281
+ ("encoder-decoder", "TFEncoderDecoderModel"),
282
+ ("led", "TFLEDForConditionalGeneration"),
283
+ ("marian", "TFMarianMTModel"),
284
+ ("mbart", "TFMBartForConditionalGeneration"),
285
+ ("mt5", "TFMT5ForConditionalGeneration"),
286
+ ("pegasus", "TFPegasusForConditionalGeneration"),
287
+ ("t5", "TFT5ForConditionalGeneration"),
288
+ ]
289
+ )
290
+
291
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
292
+ [
293
+ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
294
+ ("whisper", "TFWhisperForConditionalGeneration"),
295
+ ]
296
+ )
297
+
298
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
299
+ [
300
+ # Model for Sequence Classification mapping
301
+ ("albert", "TFAlbertForSequenceClassification"),
302
+ ("bart", "TFBartForSequenceClassification"),
303
+ ("bert", "TFBertForSequenceClassification"),
304
+ ("camembert", "TFCamembertForSequenceClassification"),
305
+ ("convbert", "TFConvBertForSequenceClassification"),
306
+ ("ctrl", "TFCTRLForSequenceClassification"),
307
+ ("deberta", "TFDebertaForSequenceClassification"),
308
+ ("deberta-v2", "TFDebertaV2ForSequenceClassification"),
309
+ ("distilbert", "TFDistilBertForSequenceClassification"),
310
+ ("electra", "TFElectraForSequenceClassification"),
311
+ ("esm", "TFEsmForSequenceClassification"),
312
+ ("flaubert", "TFFlaubertForSequenceClassification"),
313
+ ("funnel", "TFFunnelForSequenceClassification"),
314
+ ("gpt-sw3", "TFGPT2ForSequenceClassification"),
315
+ ("gpt2", "TFGPT2ForSequenceClassification"),
316
+ ("gptj", "TFGPTJForSequenceClassification"),
317
+ ("layoutlm", "TFLayoutLMForSequenceClassification"),
318
+ ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"),
319
+ ("longformer", "TFLongformerForSequenceClassification"),
320
+ ("mobilebert", "TFMobileBertForSequenceClassification"),
321
+ ("mpnet", "TFMPNetForSequenceClassification"),
322
+ ("openai-gpt", "TFOpenAIGPTForSequenceClassification"),
323
+ ("rembert", "TFRemBertForSequenceClassification"),
324
+ ("roberta", "TFRobertaForSequenceClassification"),
325
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForSequenceClassification"),
326
+ ("roformer", "TFRoFormerForSequenceClassification"),
327
+ ("tapas", "TFTapasForSequenceClassification"),
328
+ ("transfo-xl", "TFTransfoXLForSequenceClassification"),
329
+ ("xlm", "TFXLMForSequenceClassification"),
330
+ ("xlm-roberta", "TFXLMRobertaForSequenceClassification"),
331
+ ("xlnet", "TFXLNetForSequenceClassification"),
332
+ ]
333
+ )
334
+
335
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
336
+ [
337
+ # Model for Question Answering mapping
338
+ ("albert", "TFAlbertForQuestionAnswering"),
339
+ ("bert", "TFBertForQuestionAnswering"),
340
+ ("camembert", "TFCamembertForQuestionAnswering"),
341
+ ("convbert", "TFConvBertForQuestionAnswering"),
342
+ ("deberta", "TFDebertaForQuestionAnswering"),
343
+ ("deberta-v2", "TFDebertaV2ForQuestionAnswering"),
344
+ ("distilbert", "TFDistilBertForQuestionAnswering"),
345
+ ("electra", "TFElectraForQuestionAnswering"),
346
+ ("flaubert", "TFFlaubertForQuestionAnsweringSimple"),
347
+ ("funnel", "TFFunnelForQuestionAnswering"),
348
+ ("gptj", "TFGPTJForQuestionAnswering"),
349
+ ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"),
350
+ ("longformer", "TFLongformerForQuestionAnswering"),
351
+ ("mobilebert", "TFMobileBertForQuestionAnswering"),
352
+ ("mpnet", "TFMPNetForQuestionAnswering"),
353
+ ("rembert", "TFRemBertForQuestionAnswering"),
354
+ ("roberta", "TFRobertaForQuestionAnswering"),
355
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForQuestionAnswering"),
356
+ ("roformer", "TFRoFormerForQuestionAnswering"),
357
+ ("xlm", "TFXLMForQuestionAnsweringSimple"),
358
+ ("xlm-roberta", "TFXLMRobertaForQuestionAnswering"),
359
+ ("xlnet", "TFXLNetForQuestionAnsweringSimple"),
360
+ ]
361
+ )
362
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([("wav2vec2", "TFWav2Vec2ForSequenceClassification")])
363
+
364
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
365
+ [
366
+ ("layoutlm", "TFLayoutLMForQuestionAnswering"),
367
+ ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"),
368
+ ]
369
+ )
370
+
371
+
372
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
373
+ [
374
+ # Model for Table Question Answering mapping
375
+ ("tapas", "TFTapasForQuestionAnswering"),
376
+ ]
377
+ )
378
+
379
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
380
+ [
381
+ # Model for Token Classification mapping
382
+ ("albert", "TFAlbertForTokenClassification"),
383
+ ("bert", "TFBertForTokenClassification"),
384
+ ("camembert", "TFCamembertForTokenClassification"),
385
+ ("convbert", "TFConvBertForTokenClassification"),
386
+ ("deberta", "TFDebertaForTokenClassification"),
387
+ ("deberta-v2", "TFDebertaV2ForTokenClassification"),
388
+ ("distilbert", "TFDistilBertForTokenClassification"),
389
+ ("electra", "TFElectraForTokenClassification"),
390
+ ("esm", "TFEsmForTokenClassification"),
391
+ ("flaubert", "TFFlaubertForTokenClassification"),
392
+ ("funnel", "TFFunnelForTokenClassification"),
393
+ ("layoutlm", "TFLayoutLMForTokenClassification"),
394
+ ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"),
395
+ ("longformer", "TFLongformerForTokenClassification"),
396
+ ("mobilebert", "TFMobileBertForTokenClassification"),
397
+ ("mpnet", "TFMPNetForTokenClassification"),
398
+ ("rembert", "TFRemBertForTokenClassification"),
399
+ ("roberta", "TFRobertaForTokenClassification"),
400
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForTokenClassification"),
401
+ ("roformer", "TFRoFormerForTokenClassification"),
402
+ ("xlm", "TFXLMForTokenClassification"),
403
+ ("xlm-roberta", "TFXLMRobertaForTokenClassification"),
404
+ ("xlnet", "TFXLNetForTokenClassification"),
405
+ ]
406
+ )
407
+
408
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
409
+ [
410
+ # Model for Multiple Choice mapping
411
+ ("albert", "TFAlbertForMultipleChoice"),
412
+ ("bert", "TFBertForMultipleChoice"),
413
+ ("camembert", "TFCamembertForMultipleChoice"),
414
+ ("convbert", "TFConvBertForMultipleChoice"),
415
+ ("deberta-v2", "TFDebertaV2ForMultipleChoice"),
416
+ ("distilbert", "TFDistilBertForMultipleChoice"),
417
+ ("electra", "TFElectraForMultipleChoice"),
418
+ ("flaubert", "TFFlaubertForMultipleChoice"),
419
+ ("funnel", "TFFunnelForMultipleChoice"),
420
+ ("longformer", "TFLongformerForMultipleChoice"),
421
+ ("mobilebert", "TFMobileBertForMultipleChoice"),
422
+ ("mpnet", "TFMPNetForMultipleChoice"),
423
+ ("rembert", "TFRemBertForMultipleChoice"),
424
+ ("roberta", "TFRobertaForMultipleChoice"),
425
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMultipleChoice"),
426
+ ("roformer", "TFRoFormerForMultipleChoice"),
427
+ ("xlm", "TFXLMForMultipleChoice"),
428
+ ("xlm-roberta", "TFXLMRobertaForMultipleChoice"),
429
+ ("xlnet", "TFXLNetForMultipleChoice"),
430
+ ]
431
+ )
432
+
433
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
434
+ [
435
+ ("bert", "TFBertForNextSentencePrediction"),
436
+ ("mobilebert", "TFMobileBertForNextSentencePrediction"),
437
+ ]
438
+ )
439
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict(
440
+ [
441
+ ("sam", "TFSamModel"),
442
+ ]
443
+ )
444
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict(
445
+ [
446
+ ("albert", "TFAlbertModel"),
447
+ ("bert", "TFBertModel"),
448
+ ("convbert", "TFConvBertModel"),
449
+ ("deberta", "TFDebertaModel"),
450
+ ("deberta-v2", "TFDebertaV2Model"),
451
+ ("distilbert", "TFDistilBertModel"),
452
+ ("electra", "TFElectraModel"),
453
+ ("flaubert", "TFFlaubertModel"),
454
+ ("longformer", "TFLongformerModel"),
455
+ ("mobilebert", "TFMobileBertModel"),
456
+ ("mt5", "TFMT5EncoderModel"),
457
+ ("rembert", "TFRemBertModel"),
458
+ ("roberta", "TFRobertaModel"),
459
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"),
460
+ ("roformer", "TFRoFormerModel"),
461
+ ("t5", "TFT5EncoderModel"),
462
+ ("xlm", "TFXLMModel"),
463
+ ("xlm-roberta", "TFXLMRobertaModel"),
464
+ ]
465
+ )
466
+
467
+ TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES)
468
+ TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
469
+ TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES)
470
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
471
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
472
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
473
+ )
474
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
475
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
476
+ )
477
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
478
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES
479
+ )
480
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(
481
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES
482
+ )
483
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
484
+ TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
485
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
486
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
487
+ )
488
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
489
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
490
+ )
491
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(
492
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
493
+ )
494
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
495
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
496
+ )
497
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
498
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
499
+ )
500
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
501
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
502
+ )
503
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
504
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
505
+ )
506
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
507
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
508
+ )
509
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
510
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
511
+ )
512
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
513
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
514
+ )
515
+
516
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(
517
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES
518
+ )
519
+
520
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES)
521
+
522
+
523
+ class TFAutoModelForMaskGeneration(_BaseAutoModelClass):
524
+ _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING
525
+
526
+
527
+ class TFAutoModelForTextEncoding(_BaseAutoModelClass):
528
+ _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING
529
+
530
+
531
+ class TFAutoModel(_BaseAutoModelClass):
532
+ _model_mapping = TF_MODEL_MAPPING
533
+
534
+
535
+ TFAutoModel = auto_class_update(TFAutoModel)
536
+
537
+
538
+ class TFAutoModelForAudioClassification(_BaseAutoModelClass):
539
+ _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
540
+
541
+
542
+ TFAutoModelForAudioClassification = auto_class_update(
543
+ TFAutoModelForAudioClassification, head_doc="audio classification"
544
+ )
545
+
546
+
547
+ class TFAutoModelForPreTraining(_BaseAutoModelClass):
548
+ _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING
549
+
550
+
551
+ TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining")
552
+
553
+
554
+ # Private on purpose, the public class will add the deprecation warnings.
555
+ class _TFAutoModelWithLMHead(_BaseAutoModelClass):
556
+ _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING
557
+
558
+
559
+ _TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling")
560
+
561
+
562
+ class TFAutoModelForCausalLM(_BaseAutoModelClass):
563
+ _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
564
+
565
+
566
+ TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling")
567
+
568
+
569
+ class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass):
570
+ _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
571
+
572
+
573
+ TFAutoModelForMaskedImageModeling = auto_class_update(
574
+ TFAutoModelForMaskedImageModeling, head_doc="masked image modeling"
575
+ )
576
+
577
+
578
+ class TFAutoModelForImageClassification(_BaseAutoModelClass):
579
+ _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
580
+
581
+
582
+ TFAutoModelForImageClassification = auto_class_update(
583
+ TFAutoModelForImageClassification, head_doc="image classification"
584
+ )
585
+
586
+
587
+ class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass):
588
+ _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
589
+
590
+
591
+ TFAutoModelForZeroShotImageClassification = auto_class_update(
592
+ TFAutoModelForZeroShotImageClassification, head_doc="zero-shot image classification"
593
+ )
594
+
595
+
596
+ class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass):
597
+ _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
598
+
599
+
600
+ TFAutoModelForSemanticSegmentation = auto_class_update(
601
+ TFAutoModelForSemanticSegmentation, head_doc="semantic segmentation"
602
+ )
603
+
604
+
605
+ class TFAutoModelForVision2Seq(_BaseAutoModelClass):
606
+ _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
607
+
608
+
609
+ TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc="vision-to-text modeling")
610
+
611
+
612
+ class TFAutoModelForMaskedLM(_BaseAutoModelClass):
613
+ _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
614
+
615
+
616
+ TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling")
617
+
618
+
619
+ class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass):
620
+ _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
621
+
622
+
623
+ TFAutoModelForSeq2SeqLM = auto_class_update(
624
+ TFAutoModelForSeq2SeqLM,
625
+ head_doc="sequence-to-sequence language modeling",
626
+ checkpoint_for_example="google-t5/t5-base",
627
+ )
628
+
629
+
630
+ class TFAutoModelForSequenceClassification(_BaseAutoModelClass):
631
+ _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
632
+
633
+
634
+ TFAutoModelForSequenceClassification = auto_class_update(
635
+ TFAutoModelForSequenceClassification, head_doc="sequence classification"
636
+ )
637
+
638
+
639
+ class TFAutoModelForQuestionAnswering(_BaseAutoModelClass):
640
+ _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
641
+
642
+
643
+ TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering")
644
+
645
+
646
+ class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
647
+ _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
648
+
649
+
650
+ TFAutoModelForDocumentQuestionAnswering = auto_class_update(
651
+ TFAutoModelForDocumentQuestionAnswering,
652
+ head_doc="document question answering",
653
+ checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3',
654
+ )
655
+
656
+
657
+ class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass):
658
+ _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
659
+
660
+
661
+ TFAutoModelForTableQuestionAnswering = auto_class_update(
662
+ TFAutoModelForTableQuestionAnswering,
663
+ head_doc="table question answering",
664
+ checkpoint_for_example="google/tapas-base-finetuned-wtq",
665
+ )
666
+
667
+
668
+ class TFAutoModelForTokenClassification(_BaseAutoModelClass):
669
+ _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
670
+
671
+
672
+ TFAutoModelForTokenClassification = auto_class_update(
673
+ TFAutoModelForTokenClassification, head_doc="token classification"
674
+ )
675
+
676
+
677
+ class TFAutoModelForMultipleChoice(_BaseAutoModelClass):
678
+ _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
679
+
680
+
681
+ TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice")
682
+
683
+
684
+ class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass):
685
+ _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
686
+
687
+
688
+ TFAutoModelForNextSentencePrediction = auto_class_update(
689
+ TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
690
+ )
691
+
692
+
693
+ class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
694
+ _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
695
+
696
+
697
+ TFAutoModelForSpeechSeq2Seq = auto_class_update(
698
+ TFAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
699
+ )
700
+
701
+
702
+ class TFAutoModelWithLMHead(_TFAutoModelWithLMHead):
703
+ @classmethod
704
+ def from_config(cls, config):
705
+ warnings.warn(
706
+ "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use"
707
+ " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models"
708
+ " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
709
+ FutureWarning,
710
+ )
711
+ return super().from_config(config)
712
+
713
+ @classmethod
714
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
715
+ warnings.warn(
716
+ "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use"
717
+ " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models"
718
+ " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
719
+ FutureWarning,
720
+ )
721
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AutoProcessor class."""
16
+ import importlib
17
+ import inspect
18
+ import json
19
+ import os
20
+ import warnings
21
+ from collections import OrderedDict
22
+
23
+ # Build the list of all feature extractors
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...feature_extraction_utils import FeatureExtractionMixin
27
+ from ...image_processing_utils import ImageProcessingMixin
28
+ from ...processing_utils import ProcessorMixin
29
+ from ...tokenization_utils import TOKENIZER_CONFIG_FILE
30
+ from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, get_file_from_repo, logging
31
+ from .auto_factory import _LazyAutoMapping
32
+ from .configuration_auto import (
33
+ CONFIG_MAPPING_NAMES,
34
+ AutoConfig,
35
+ model_type_to_module_name,
36
+ replace_list_option_in_docstrings,
37
+ )
38
+ from .feature_extraction_auto import AutoFeatureExtractor
39
+ from .image_processing_auto import AutoImageProcessor
40
+ from .tokenization_auto import AutoTokenizer
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ PROCESSOR_MAPPING_NAMES = OrderedDict(
46
+ [
47
+ ("align", "AlignProcessor"),
48
+ ("altclip", "AltCLIPProcessor"),
49
+ ("bark", "BarkProcessor"),
50
+ ("blip", "BlipProcessor"),
51
+ ("blip-2", "Blip2Processor"),
52
+ ("bridgetower", "BridgeTowerProcessor"),
53
+ ("chinese_clip", "ChineseCLIPProcessor"),
54
+ ("clap", "ClapProcessor"),
55
+ ("clip", "CLIPProcessor"),
56
+ ("clipseg", "CLIPSegProcessor"),
57
+ ("clvp", "ClvpProcessor"),
58
+ ("flava", "FlavaProcessor"),
59
+ ("fuyu", "FuyuProcessor"),
60
+ ("git", "GitProcessor"),
61
+ ("groupvit", "CLIPProcessor"),
62
+ ("hubert", "Wav2Vec2Processor"),
63
+ ("idefics", "IdeficsProcessor"),
64
+ ("idefics2", "Idefics2Processor"),
65
+ ("instructblip", "InstructBlipProcessor"),
66
+ ("kosmos-2", "Kosmos2Processor"),
67
+ ("layoutlmv2", "LayoutLMv2Processor"),
68
+ ("layoutlmv3", "LayoutLMv3Processor"),
69
+ ("llava", "LlavaProcessor"),
70
+ ("llava_next", "LlavaNextProcessor"),
71
+ ("markuplm", "MarkupLMProcessor"),
72
+ ("mctct", "MCTCTProcessor"),
73
+ ("mgp-str", "MgpstrProcessor"),
74
+ ("oneformer", "OneFormerProcessor"),
75
+ ("owlv2", "Owlv2Processor"),
76
+ ("owlvit", "OwlViTProcessor"),
77
+ ("pix2struct", "Pix2StructProcessor"),
78
+ ("pop2piano", "Pop2PianoProcessor"),
79
+ ("sam", "SamProcessor"),
80
+ ("seamless_m4t", "SeamlessM4TProcessor"),
81
+ ("sew", "Wav2Vec2Processor"),
82
+ ("sew-d", "Wav2Vec2Processor"),
83
+ ("siglip", "SiglipProcessor"),
84
+ ("speech_to_text", "Speech2TextProcessor"),
85
+ ("speech_to_text_2", "Speech2Text2Processor"),
86
+ ("speecht5", "SpeechT5Processor"),
87
+ ("trocr", "TrOCRProcessor"),
88
+ ("tvlt", "TvltProcessor"),
89
+ ("tvp", "TvpProcessor"),
90
+ ("unispeech", "Wav2Vec2Processor"),
91
+ ("unispeech-sat", "Wav2Vec2Processor"),
92
+ ("vilt", "ViltProcessor"),
93
+ ("vipllava", "LlavaProcessor"),
94
+ ("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"),
95
+ ("wav2vec2", "Wav2Vec2Processor"),
96
+ ("wav2vec2-bert", "Wav2Vec2Processor"),
97
+ ("wav2vec2-conformer", "Wav2Vec2Processor"),
98
+ ("wavlm", "Wav2Vec2Processor"),
99
+ ("whisper", "WhisperProcessor"),
100
+ ("xclip", "XCLIPProcessor"),
101
+ ]
102
+ )
103
+
104
+ PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES)
105
+
106
+
107
+ def processor_class_from_name(class_name: str):
108
+ for module_name, processors in PROCESSOR_MAPPING_NAMES.items():
109
+ if class_name in processors:
110
+ module_name = model_type_to_module_name(module_name)
111
+
112
+ module = importlib.import_module(f".{module_name}", "transformers.models")
113
+ try:
114
+ return getattr(module, class_name)
115
+ except AttributeError:
116
+ continue
117
+
118
+ for processor in PROCESSOR_MAPPING._extra_content.values():
119
+ if getattr(processor, "__name__", None) == class_name:
120
+ return processor
121
+
122
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
123
+ # init and we return the proper dummy to get an appropriate error message.
124
+ main_module = importlib.import_module("transformers")
125
+ if hasattr(main_module, class_name):
126
+ return getattr(main_module, class_name)
127
+
128
+ return None
129
+
130
+
131
+ class AutoProcessor:
132
+ r"""
133
+ This is a generic processor class that will be instantiated as one of the processor classes of the library when
134
+ created with the [`AutoProcessor.from_pretrained`] class method.
135
+
136
+ This class cannot be instantiated directly using `__init__()` (throws an error).
137
+ """
138
+
139
+ def __init__(self):
140
+ raise EnvironmentError(
141
+ "AutoProcessor is designed to be instantiated "
142
+ "using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
143
+ )
144
+
145
+ @classmethod
146
+ @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
147
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
148
+ r"""
149
+ Instantiate one of the processor classes of the library from a pretrained model vocabulary.
150
+
151
+ The processor class to instantiate is selected based on the `model_type` property of the config object (either
152
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible):
153
+
154
+ List options
155
+
156
+ Params:
157
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
158
+ This can be either:
159
+
160
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
161
+ huggingface.co.
162
+ - a path to a *directory* containing a processor files saved using the `save_pretrained()` method,
163
+ e.g., `./my_model_directory/`.
164
+ cache_dir (`str` or `os.PathLike`, *optional*):
165
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
166
+ standard cache should not be used.
167
+ force_download (`bool`, *optional*, defaults to `False`):
168
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
169
+ if they exist.
170
+ resume_download (`bool`, *optional*, defaults to `False`):
171
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
172
+ exists.
173
+ proxies (`Dict[str, str]`, *optional*):
174
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
175
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
176
+ token (`str` or *bool*, *optional*):
177
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
178
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
179
+ revision (`str`, *optional*, defaults to `"main"`):
180
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
181
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
182
+ identifier allowed by git.
183
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
184
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
185
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
186
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
187
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
188
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
189
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
190
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
191
+ execute code present on the Hub on your local machine.
192
+ kwargs (`Dict[str, Any]`, *optional*):
193
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
194
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
195
+ controlled by the `return_unused_kwargs` keyword parameter.
196
+
197
+ <Tip>
198
+
199
+ Passing `token=True` is required when you want to use a private model.
200
+
201
+ </Tip>
202
+
203
+ Examples:
204
+
205
+ ```python
206
+ >>> from transformers import AutoProcessor
207
+
208
+ >>> # Download processor from huggingface.co and cache.
209
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
210
+
211
+ >>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*)
212
+ >>> # processor = AutoProcessor.from_pretrained("./test/saved_model/")
213
+ ```"""
214
+ use_auth_token = kwargs.pop("use_auth_token", None)
215
+ if use_auth_token is not None:
216
+ warnings.warn(
217
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
218
+ FutureWarning,
219
+ )
220
+ if kwargs.get("token", None) is not None:
221
+ raise ValueError(
222
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
223
+ )
224
+ kwargs["token"] = use_auth_token
225
+
226
+ config = kwargs.pop("config", None)
227
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
228
+ kwargs["_from_auto"] = True
229
+
230
+ processor_class = None
231
+ processor_auto_map = None
232
+
233
+ # First, let's see if we have a processor or preprocessor config.
234
+ # Filter the kwargs for `get_file_from_repo`.
235
+ get_file_from_repo_kwargs = {
236
+ key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs
237
+ }
238
+
239
+ # Let's start by checking whether the processor class is saved in a processor config
240
+ processor_config_file = get_file_from_repo(
241
+ pretrained_model_name_or_path, PROCESSOR_NAME, **get_file_from_repo_kwargs
242
+ )
243
+ if processor_config_file is not None:
244
+ config_dict, _ = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs)
245
+ processor_class = config_dict.get("processor_class", None)
246
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
247
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
248
+
249
+ if processor_class is None:
250
+ # If not found, let's check whether the processor class is saved in an image processor config
251
+ preprocessor_config_file = get_file_from_repo(
252
+ pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs
253
+ )
254
+ if preprocessor_config_file is not None:
255
+ config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
256
+ processor_class = config_dict.get("processor_class", None)
257
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
258
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
259
+
260
+ # If not found, let's check whether the processor class is saved in a feature extractor config
261
+ if preprocessor_config_file is not None and processor_class is None:
262
+ config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(
263
+ pretrained_model_name_or_path, **kwargs
264
+ )
265
+ processor_class = config_dict.get("processor_class", None)
266
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
267
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
268
+
269
+ if processor_class is None:
270
+ # Next, let's check whether the processor class is saved in a tokenizer
271
+ tokenizer_config_file = get_file_from_repo(
272
+ pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs
273
+ )
274
+ if tokenizer_config_file is not None:
275
+ with open(tokenizer_config_file, encoding="utf-8") as reader:
276
+ config_dict = json.load(reader)
277
+
278
+ processor_class = config_dict.get("processor_class", None)
279
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
280
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
281
+
282
+ if processor_class is None:
283
+ # Otherwise, load config, if it can be loaded.
284
+ if not isinstance(config, PretrainedConfig):
285
+ config = AutoConfig.from_pretrained(
286
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
287
+ )
288
+
289
+ # And check if the config contains the processor class.
290
+ processor_class = getattr(config, "processor_class", None)
291
+ if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map:
292
+ processor_auto_map = config.auto_map["AutoProcessor"]
293
+
294
+ if processor_class is not None:
295
+ processor_class = processor_class_from_name(processor_class)
296
+
297
+ has_remote_code = processor_auto_map is not None
298
+ has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING
299
+ trust_remote_code = resolve_trust_remote_code(
300
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
301
+ )
302
+
303
+ if has_remote_code and trust_remote_code:
304
+ processor_class = get_class_from_dynamic_module(
305
+ processor_auto_map, pretrained_model_name_or_path, **kwargs
306
+ )
307
+ _ = kwargs.pop("code_revision", None)
308
+ if os.path.isdir(pretrained_model_name_or_path):
309
+ processor_class.register_for_auto_class()
310
+ return processor_class.from_pretrained(
311
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
312
+ )
313
+ elif processor_class is not None:
314
+ return processor_class.from_pretrained(
315
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
316
+ )
317
+ # Last try: we use the PROCESSOR_MAPPING.
318
+ elif type(config) in PROCESSOR_MAPPING:
319
+ return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
320
+
321
+ # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a
322
+ # tokenizer.
323
+ try:
324
+ return AutoTokenizer.from_pretrained(
325
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
326
+ )
327
+ except Exception:
328
+ try:
329
+ return AutoImageProcessor.from_pretrained(
330
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
331
+ )
332
+ except Exception:
333
+ pass
334
+
335
+ try:
336
+ return AutoFeatureExtractor.from_pretrained(
337
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
338
+ )
339
+ except Exception:
340
+ pass
341
+
342
+ raise ValueError(
343
+ f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a "
344
+ "tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains "
345
+ "the files of at least one of those processing classes."
346
+ )
347
+
348
+ @staticmethod
349
+ def register(config_class, processor_class, exist_ok=False):
350
+ """
351
+ Register a new processor for this class.
352
+
353
+ Args:
354
+ config_class ([`PretrainedConfig`]):
355
+ The configuration corresponding to the model to register.
356
+ processor_class ([`FeatureExtractorMixin`]): The processor to register.
357
+ """
358
+ PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok)
venv/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Tokenizer class."""
16
+
17
+ import importlib
18
+ import json
19
+ import os
20
+ import warnings
21
+ from collections import OrderedDict
22
+ from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...tokenization_utils import PreTrainedTokenizer
27
+ from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE
28
+ from ...utils import (
29
+ cached_file,
30
+ extract_commit_hash,
31
+ is_g2p_en_available,
32
+ is_sentencepiece_available,
33
+ is_tokenizers_available,
34
+ logging,
35
+ )
36
+ from ..encoder_decoder import EncoderDecoderConfig
37
+ from .auto_factory import _LazyAutoMapping
38
+ from .configuration_auto import (
39
+ CONFIG_MAPPING_NAMES,
40
+ AutoConfig,
41
+ config_class_to_model_type,
42
+ model_type_to_module_name,
43
+ replace_list_option_in_docstrings,
44
+ )
45
+
46
+
47
+ if is_tokenizers_available():
48
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
49
+ else:
50
+ PreTrainedTokenizerFast = None
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ if TYPE_CHECKING:
56
+ # This significantly improves completion suggestion performance when
57
+ # the transformers package is used with Microsoft's Pylance language server.
58
+ TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict()
59
+ else:
60
+ TOKENIZER_MAPPING_NAMES = OrderedDict(
61
+ [
62
+ (
63
+ "albert",
64
+ (
65
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
66
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
67
+ ),
68
+ ),
69
+ ("align", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
70
+ ("bark", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
71
+ ("bart", ("BartTokenizer", "BartTokenizerFast")),
72
+ (
73
+ "barthez",
74
+ (
75
+ "BarthezTokenizer" if is_sentencepiece_available() else None,
76
+ "BarthezTokenizerFast" if is_tokenizers_available() else None,
77
+ ),
78
+ ),
79
+ ("bartpho", ("BartphoTokenizer", None)),
80
+ ("bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
81
+ ("bert-generation", ("BertGenerationTokenizer" if is_sentencepiece_available() else None, None)),
82
+ ("bert-japanese", ("BertJapaneseTokenizer", None)),
83
+ ("bertweet", ("BertweetTokenizer", None)),
84
+ (
85
+ "big_bird",
86
+ (
87
+ "BigBirdTokenizer" if is_sentencepiece_available() else None,
88
+ "BigBirdTokenizerFast" if is_tokenizers_available() else None,
89
+ ),
90
+ ),
91
+ ("bigbird_pegasus", ("PegasusTokenizer", "PegasusTokenizerFast" if is_tokenizers_available() else None)),
92
+ ("biogpt", ("BioGptTokenizer", None)),
93
+ ("blenderbot", ("BlenderbotTokenizer", "BlenderbotTokenizerFast")),
94
+ ("blenderbot-small", ("BlenderbotSmallTokenizer", None)),
95
+ ("blip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
96
+ ("blip-2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
97
+ ("bloom", (None, "BloomTokenizerFast" if is_tokenizers_available() else None)),
98
+ ("bridgetower", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
99
+ ("bros", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
100
+ ("byt5", ("ByT5Tokenizer", None)),
101
+ (
102
+ "camembert",
103
+ (
104
+ "CamembertTokenizer" if is_sentencepiece_available() else None,
105
+ "CamembertTokenizerFast" if is_tokenizers_available() else None,
106
+ ),
107
+ ),
108
+ ("canine", ("CanineTokenizer", None)),
109
+ ("chinese_clip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
110
+ (
111
+ "clap",
112
+ (
113
+ "RobertaTokenizer",
114
+ "RobertaTokenizerFast" if is_tokenizers_available() else None,
115
+ ),
116
+ ),
117
+ (
118
+ "clip",
119
+ (
120
+ "CLIPTokenizer",
121
+ "CLIPTokenizerFast" if is_tokenizers_available() else None,
122
+ ),
123
+ ),
124
+ (
125
+ "clipseg",
126
+ (
127
+ "CLIPTokenizer",
128
+ "CLIPTokenizerFast" if is_tokenizers_available() else None,
129
+ ),
130
+ ),
131
+ ("clvp", ("ClvpTokenizer", None)),
132
+ (
133
+ "code_llama",
134
+ (
135
+ "CodeLlamaTokenizer" if is_sentencepiece_available() else None,
136
+ "CodeLlamaTokenizerFast" if is_tokenizers_available() else None,
137
+ ),
138
+ ),
139
+ ("codegen", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)),
140
+ ("cohere", (None, "CohereTokenizerFast" if is_tokenizers_available() else None)),
141
+ ("convbert", ("ConvBertTokenizer", "ConvBertTokenizerFast" if is_tokenizers_available() else None)),
142
+ (
143
+ "cpm",
144
+ (
145
+ "CpmTokenizer" if is_sentencepiece_available() else None,
146
+ "CpmTokenizerFast" if is_tokenizers_available() else None,
147
+ ),
148
+ ),
149
+ ("cpmant", ("CpmAntTokenizer", None)),
150
+ ("ctrl", ("CTRLTokenizer", None)),
151
+ ("data2vec-audio", ("Wav2Vec2CTCTokenizer", None)),
152
+ ("data2vec-text", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
153
+ ("dbrx", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
154
+ ("deberta", ("DebertaTokenizer", "DebertaTokenizerFast" if is_tokenizers_available() else None)),
155
+ (
156
+ "deberta-v2",
157
+ (
158
+ "DebertaV2Tokenizer" if is_sentencepiece_available() else None,
159
+ "DebertaV2TokenizerFast" if is_tokenizers_available() else None,
160
+ ),
161
+ ),
162
+ ("distilbert", ("DistilBertTokenizer", "DistilBertTokenizerFast" if is_tokenizers_available() else None)),
163
+ (
164
+ "dpr",
165
+ (
166
+ "DPRQuestionEncoderTokenizer",
167
+ "DPRQuestionEncoderTokenizerFast" if is_tokenizers_available() else None,
168
+ ),
169
+ ),
170
+ ("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)),
171
+ ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
172
+ ("ernie_m", ("ErnieMTokenizer" if is_sentencepiece_available() else None, None)),
173
+ ("esm", ("EsmTokenizer", None)),
174
+ ("falcon", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)),
175
+ (
176
+ "fastspeech2_conformer",
177
+ ("FastSpeech2ConformerTokenizer" if is_g2p_en_available() else None, None),
178
+ ),
179
+ ("flaubert", ("FlaubertTokenizer", None)),
180
+ ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)),
181
+ ("fsmt", ("FSMTTokenizer", None)),
182
+ ("funnel", ("FunnelTokenizer", "FunnelTokenizerFast" if is_tokenizers_available() else None)),
183
+ (
184
+ "gemma",
185
+ (
186
+ "GemmaTokenizer" if is_sentencepiece_available() else None,
187
+ "GemmaTokenizerFast" if is_tokenizers_available() else None,
188
+ ),
189
+ ),
190
+ ("git", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
191
+ ("gpt-sw3", ("GPTSw3Tokenizer" if is_sentencepiece_available() else None, None)),
192
+ ("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
193
+ ("gpt_bigcode", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
194
+ ("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
195
+ ("gpt_neox", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
196
+ ("gpt_neox_japanese", ("GPTNeoXJapaneseTokenizer", None)),
197
+ ("gptj", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
198
+ ("gptsan-japanese", ("GPTSanJapaneseTokenizer", None)),
199
+ ("grounding-dino", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
200
+ ("groupvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
201
+ ("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)),
202
+ ("hubert", ("Wav2Vec2CTCTokenizer", None)),
203
+ ("ibert", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
204
+ ("idefics", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)),
205
+ ("idefics2", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
206
+ ("instructblip", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
207
+ (
208
+ "jamba",
209
+ (
210
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
211
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
212
+ ),
213
+ ),
214
+ ("jukebox", ("JukeboxTokenizer", None)),
215
+ (
216
+ "kosmos-2",
217
+ (
218
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
219
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
220
+ ),
221
+ ),
222
+ ("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)),
223
+ ("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)),
224
+ ("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
225
+ ("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)),
226
+ ("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)),
227
+ ("lilt", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
228
+ (
229
+ "llama",
230
+ (
231
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
232
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
233
+ ),
234
+ ),
235
+ ("llava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
236
+ ("llava_next", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
237
+ ("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)),
238
+ (
239
+ "longt5",
240
+ (
241
+ "T5Tokenizer" if is_sentencepiece_available() else None,
242
+ "T5TokenizerFast" if is_tokenizers_available() else None,
243
+ ),
244
+ ),
245
+ ("luke", ("LukeTokenizer", None)),
246
+ ("lxmert", ("LxmertTokenizer", "LxmertTokenizerFast" if is_tokenizers_available() else None)),
247
+ ("m2m_100", ("M2M100Tokenizer" if is_sentencepiece_available() else None, None)),
248
+ ("mamba", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
249
+ ("marian", ("MarianTokenizer" if is_sentencepiece_available() else None, None)),
250
+ (
251
+ "mbart",
252
+ (
253
+ "MBartTokenizer" if is_sentencepiece_available() else None,
254
+ "MBartTokenizerFast" if is_tokenizers_available() else None,
255
+ ),
256
+ ),
257
+ (
258
+ "mbart50",
259
+ (
260
+ "MBart50Tokenizer" if is_sentencepiece_available() else None,
261
+ "MBart50TokenizerFast" if is_tokenizers_available() else None,
262
+ ),
263
+ ),
264
+ ("mega", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
265
+ ("megatron-bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
266
+ ("mgp-str", ("MgpstrTokenizer", None)),
267
+ (
268
+ "mistral",
269
+ (
270
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
271
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
272
+ ),
273
+ ),
274
+ (
275
+ "mixtral",
276
+ (
277
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
278
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
279
+ ),
280
+ ),
281
+ ("mluke", ("MLukeTokenizer" if is_sentencepiece_available() else None, None)),
282
+ ("mobilebert", ("MobileBertTokenizer", "MobileBertTokenizerFast" if is_tokenizers_available() else None)),
283
+ ("mpnet", ("MPNetTokenizer", "MPNetTokenizerFast" if is_tokenizers_available() else None)),
284
+ ("mpt", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
285
+ ("mra", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
286
+ (
287
+ "mt5",
288
+ (
289
+ "MT5Tokenizer" if is_sentencepiece_available() else None,
290
+ "MT5TokenizerFast" if is_tokenizers_available() else None,
291
+ ),
292
+ ),
293
+ ("musicgen", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
294
+ ("musicgen_melody", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
295
+ ("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)),
296
+ ("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
297
+ (
298
+ "nllb",
299
+ (
300
+ "NllbTokenizer" if is_sentencepiece_available() else None,
301
+ "NllbTokenizerFast" if is_tokenizers_available() else None,
302
+ ),
303
+ ),
304
+ (
305
+ "nllb-moe",
306
+ (
307
+ "NllbTokenizer" if is_sentencepiece_available() else None,
308
+ "NllbTokenizerFast" if is_tokenizers_available() else None,
309
+ ),
310
+ ),
311
+ (
312
+ "nystromformer",
313
+ (
314
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
315
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
316
+ ),
317
+ ),
318
+ ("olmo", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
319
+ ("oneformer", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
320
+ (
321
+ "openai-gpt",
322
+ ("OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" if is_tokenizers_available() else None),
323
+ ),
324
+ ("opt", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
325
+ ("owlv2", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
326
+ ("owlvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
327
+ (
328
+ "pegasus",
329
+ (
330
+ "PegasusTokenizer" if is_sentencepiece_available() else None,
331
+ "PegasusTokenizerFast" if is_tokenizers_available() else None,
332
+ ),
333
+ ),
334
+ (
335
+ "pegasus_x",
336
+ (
337
+ "PegasusTokenizer" if is_sentencepiece_available() else None,
338
+ "PegasusTokenizerFast" if is_tokenizers_available() else None,
339
+ ),
340
+ ),
341
+ (
342
+ "perceiver",
343
+ (
344
+ "PerceiverTokenizer",
345
+ None,
346
+ ),
347
+ ),
348
+ (
349
+ "persimmon",
350
+ (
351
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
352
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
353
+ ),
354
+ ),
355
+ ("phi", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)),
356
+ ("phobert", ("PhobertTokenizer", None)),
357
+ ("pix2struct", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
358
+ ("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)),
359
+ ("prophetnet", ("ProphetNetTokenizer", None)),
360
+ ("qdqbert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
361
+ (
362
+ "qwen2",
363
+ (
364
+ "Qwen2Tokenizer",
365
+ "Qwen2TokenizerFast" if is_tokenizers_available() else None,
366
+ ),
367
+ ),
368
+ (
369
+ "qwen2_moe",
370
+ (
371
+ "Qwen2Tokenizer",
372
+ "Qwen2TokenizerFast" if is_tokenizers_available() else None,
373
+ ),
374
+ ),
375
+ ("rag", ("RagTokenizer", None)),
376
+ ("realm", ("RealmTokenizer", "RealmTokenizerFast" if is_tokenizers_available() else None)),
377
+ (
378
+ "recurrent_gemma",
379
+ (
380
+ "GemmaTokenizer" if is_sentencepiece_available() else None,
381
+ "GemmaTokenizerFast" if is_tokenizers_available() else None,
382
+ ),
383
+ ),
384
+ (
385
+ "reformer",
386
+ (
387
+ "ReformerTokenizer" if is_sentencepiece_available() else None,
388
+ "ReformerTokenizerFast" if is_tokenizers_available() else None,
389
+ ),
390
+ ),
391
+ (
392
+ "rembert",
393
+ (
394
+ "RemBertTokenizer" if is_sentencepiece_available() else None,
395
+ "RemBertTokenizerFast" if is_tokenizers_available() else None,
396
+ ),
397
+ ),
398
+ ("retribert", ("RetriBertTokenizer", "RetriBertTokenizerFast" if is_tokenizers_available() else None)),
399
+ ("roberta", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
400
+ (
401
+ "roberta-prelayernorm",
402
+ ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None),
403
+ ),
404
+ ("roc_bert", ("RoCBertTokenizer", None)),
405
+ ("roformer", ("RoFormerTokenizer", "RoFormerTokenizerFast" if is_tokenizers_available() else None)),
406
+ ("rwkv", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
407
+ (
408
+ "seamless_m4t",
409
+ (
410
+ "SeamlessM4TTokenizer" if is_sentencepiece_available() else None,
411
+ "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None,
412
+ ),
413
+ ),
414
+ (
415
+ "seamless_m4t_v2",
416
+ (
417
+ "SeamlessM4TTokenizer" if is_sentencepiece_available() else None,
418
+ "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None,
419
+ ),
420
+ ),
421
+ ("siglip", ("SiglipTokenizer" if is_sentencepiece_available() else None, None)),
422
+ ("speech_to_text", ("Speech2TextTokenizer" if is_sentencepiece_available() else None, None)),
423
+ ("speech_to_text_2", ("Speech2Text2Tokenizer", None)),
424
+ ("speecht5", ("SpeechT5Tokenizer" if is_sentencepiece_available() else None, None)),
425
+ ("splinter", ("SplinterTokenizer", "SplinterTokenizerFast")),
426
+ (
427
+ "squeezebert",
428
+ ("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None),
429
+ ),
430
+ ("stablelm", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
431
+ ("starcoder2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
432
+ (
433
+ "switch_transformers",
434
+ (
435
+ "T5Tokenizer" if is_sentencepiece_available() else None,
436
+ "T5TokenizerFast" if is_tokenizers_available() else None,
437
+ ),
438
+ ),
439
+ (
440
+ "t5",
441
+ (
442
+ "T5Tokenizer" if is_sentencepiece_available() else None,
443
+ "T5TokenizerFast" if is_tokenizers_available() else None,
444
+ ),
445
+ ),
446
+ ("tapas", ("TapasTokenizer", None)),
447
+ ("tapex", ("TapexTokenizer", None)),
448
+ ("transfo-xl", ("TransfoXLTokenizer", None)),
449
+ ("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
450
+ (
451
+ "udop",
452
+ (
453
+ "UdopTokenizer" if is_sentencepiece_available() else None,
454
+ "UdopTokenizerFast" if is_tokenizers_available() else None,
455
+ ),
456
+ ),
457
+ (
458
+ "umt5",
459
+ (
460
+ "T5Tokenizer" if is_sentencepiece_available() else None,
461
+ "T5TokenizerFast" if is_tokenizers_available() else None,
462
+ ),
463
+ ),
464
+ ("vilt", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
465
+ ("vipllava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
466
+ ("visual_bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
467
+ ("vits", ("VitsTokenizer", None)),
468
+ ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)),
469
+ ("wav2vec2-bert", ("Wav2Vec2CTCTokenizer", None)),
470
+ ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)),
471
+ ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)),
472
+ ("whisper", ("WhisperTokenizer", "WhisperTokenizerFast" if is_tokenizers_available() else None)),
473
+ ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
474
+ (
475
+ "xglm",
476
+ (
477
+ "XGLMTokenizer" if is_sentencepiece_available() else None,
478
+ "XGLMTokenizerFast" if is_tokenizers_available() else None,
479
+ ),
480
+ ),
481
+ ("xlm", ("XLMTokenizer", None)),
482
+ ("xlm-prophetnet", ("XLMProphetNetTokenizer" if is_sentencepiece_available() else None, None)),
483
+ (
484
+ "xlm-roberta",
485
+ (
486
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
487
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
488
+ ),
489
+ ),
490
+ (
491
+ "xlm-roberta-xl",
492
+ (
493
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
494
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
495
+ ),
496
+ ),
497
+ (
498
+ "xlnet",
499
+ (
500
+ "XLNetTokenizer" if is_sentencepiece_available() else None,
501
+ "XLNetTokenizerFast" if is_tokenizers_available() else None,
502
+ ),
503
+ ),
504
+ (
505
+ "xmod",
506
+ (
507
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
508
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
509
+ ),
510
+ ),
511
+ (
512
+ "yoso",
513
+ (
514
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
515
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
516
+ ),
517
+ ),
518
+ ]
519
+ )
520
+
521
+ TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES)
522
+
523
+ CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()}
524
+
525
+
526
+ def tokenizer_class_from_name(class_name: str):
527
+ if class_name == "PreTrainedTokenizerFast":
528
+ return PreTrainedTokenizerFast
529
+
530
+ for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items():
531
+ if class_name in tokenizers:
532
+ module_name = model_type_to_module_name(module_name)
533
+
534
+ module = importlib.import_module(f".{module_name}", "transformers.models")
535
+ try:
536
+ return getattr(module, class_name)
537
+ except AttributeError:
538
+ continue
539
+
540
+ for config, tokenizers in TOKENIZER_MAPPING._extra_content.items():
541
+ for tokenizer in tokenizers:
542
+ if getattr(tokenizer, "__name__", None) == class_name:
543
+ return tokenizer
544
+
545
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
546
+ # init and we return the proper dummy to get an appropriate error message.
547
+ main_module = importlib.import_module("transformers")
548
+ if hasattr(main_module, class_name):
549
+ return getattr(main_module, class_name)
550
+
551
+ return None
552
+
553
+
554
+ def get_tokenizer_config(
555
+ pretrained_model_name_or_path: Union[str, os.PathLike],
556
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
557
+ force_download: bool = False,
558
+ resume_download: bool = False,
559
+ proxies: Optional[Dict[str, str]] = None,
560
+ token: Optional[Union[bool, str]] = None,
561
+ revision: Optional[str] = None,
562
+ local_files_only: bool = False,
563
+ subfolder: str = "",
564
+ **kwargs,
565
+ ):
566
+ """
567
+ Loads the tokenizer configuration from a pretrained model tokenizer configuration.
568
+
569
+ Args:
570
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
571
+ This can be either:
572
+
573
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
574
+ huggingface.co.
575
+ - a path to a *directory* containing a configuration file saved using the
576
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
577
+
578
+ cache_dir (`str` or `os.PathLike`, *optional*):
579
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
580
+ cache should not be used.
581
+ force_download (`bool`, *optional*, defaults to `False`):
582
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
583
+ exist.
584
+ resume_download (`bool`, *optional*, defaults to `False`):
585
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
586
+ proxies (`Dict[str, str]`, *optional*):
587
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
588
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
589
+ token (`str` or *bool*, *optional*):
590
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
591
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
592
+ revision (`str`, *optional*, defaults to `"main"`):
593
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
594
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
595
+ identifier allowed by git.
596
+ local_files_only (`bool`, *optional*, defaults to `False`):
597
+ If `True`, will only try to load the tokenizer configuration from local files.
598
+ subfolder (`str`, *optional*, defaults to `""`):
599
+ In case the tokenizer config is located inside a subfolder of the model repo on huggingface.co, you can
600
+ specify the folder name here.
601
+
602
+ <Tip>
603
+
604
+ Passing `token=True` is required when you want to use a private model.
605
+
606
+ </Tip>
607
+
608
+ Returns:
609
+ `Dict`: The configuration of the tokenizer.
610
+
611
+ Examples:
612
+
613
+ ```python
614
+ # Download configuration from huggingface.co and cache.
615
+ tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
616
+ # This model does not have a tokenizer config so the result will be an empty dict.
617
+ tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
618
+
619
+ # Save a pretrained tokenizer locally and you can reload its config
620
+ from transformers import AutoTokenizer
621
+
622
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
623
+ tokenizer.save_pretrained("tokenizer-test")
624
+ tokenizer_config = get_tokenizer_config("tokenizer-test")
625
+ ```"""
626
+ use_auth_token = kwargs.pop("use_auth_token", None)
627
+ if use_auth_token is not None:
628
+ warnings.warn(
629
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
630
+ FutureWarning,
631
+ )
632
+ if token is not None:
633
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
634
+ token = use_auth_token
635
+
636
+ commit_hash = kwargs.get("_commit_hash", None)
637
+ resolved_config_file = cached_file(
638
+ pretrained_model_name_or_path,
639
+ TOKENIZER_CONFIG_FILE,
640
+ cache_dir=cache_dir,
641
+ force_download=force_download,
642
+ resume_download=resume_download,
643
+ proxies=proxies,
644
+ token=token,
645
+ revision=revision,
646
+ local_files_only=local_files_only,
647
+ subfolder=subfolder,
648
+ _raise_exceptions_for_gated_repo=False,
649
+ _raise_exceptions_for_missing_entries=False,
650
+ _raise_exceptions_for_connection_errors=False,
651
+ _commit_hash=commit_hash,
652
+ )
653
+ if resolved_config_file is None:
654
+ logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.")
655
+ return {}
656
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
657
+
658
+ with open(resolved_config_file, encoding="utf-8") as reader:
659
+ result = json.load(reader)
660
+ result["_commit_hash"] = commit_hash
661
+ return result
662
+
663
+
664
+ class AutoTokenizer:
665
+ r"""
666
+ This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
667
+ created with the [`AutoTokenizer.from_pretrained`] class method.
668
+
669
+ This class cannot be instantiated directly using `__init__()` (throws an error).
670
+ """
671
+
672
+ def __init__(self):
673
+ raise EnvironmentError(
674
+ "AutoTokenizer is designed to be instantiated "
675
+ "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
676
+ )
677
+
678
+ @classmethod
679
+ @replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
680
+ def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
681
+ r"""
682
+ Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
683
+
684
+ The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
685
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
686
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
687
+
688
+ List options
689
+
690
+ Params:
691
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
692
+ Can be either:
693
+
694
+ - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
695
+ - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
696
+ using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
697
+ - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
698
+ single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
699
+ applicable to all derived classes)
700
+ inputs (additional positional arguments, *optional*):
701
+ Will be passed along to the Tokenizer `__init__()` method.
702
+ config ([`PretrainedConfig`], *optional*)
703
+ The configuration object used to determine the tokenizer class to instantiate.
704
+ cache_dir (`str` or `os.PathLike`, *optional*):
705
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
706
+ standard cache should not be used.
707
+ force_download (`bool`, *optional*, defaults to `False`):
708
+ Whether or not to force the (re-)download the model weights and configuration files and override the
709
+ cached versions if they exist.
710
+ resume_download (`bool`, *optional*, defaults to `False`):
711
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
712
+ file exists.
713
+ proxies (`Dict[str, str]`, *optional*):
714
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
715
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
716
+ revision (`str`, *optional*, defaults to `"main"`):
717
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
718
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
719
+ identifier allowed by git.
720
+ subfolder (`str`, *optional*):
721
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
722
+ facebook/rag-token-base), specify it here.
723
+ use_fast (`bool`, *optional*, defaults to `True`):
724
+ Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for
725
+ a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer
726
+ is returned instead.
727
+ tokenizer_type (`str`, *optional*):
728
+ Tokenizer type to be loaded.
729
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
730
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
731
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
732
+ execute code present on the Hub on your local machine.
733
+ kwargs (additional keyword arguments, *optional*):
734
+ Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
735
+ `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
736
+ `additional_special_tokens`. See parameters in the `__init__()` for more details.
737
+
738
+ Examples:
739
+
740
+ ```python
741
+ >>> from transformers import AutoTokenizer
742
+
743
+ >>> # Download vocabulary from huggingface.co and cache.
744
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
745
+
746
+ >>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
747
+ >>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
748
+
749
+ >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
750
+ >>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
751
+
752
+ >>> # Download vocabulary from huggingface.co and define model-specific arguments
753
+ >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", add_prefix_space=True)
754
+ ```"""
755
+ use_auth_token = kwargs.pop("use_auth_token", None)
756
+ if use_auth_token is not None:
757
+ warnings.warn(
758
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
759
+ FutureWarning,
760
+ )
761
+ if kwargs.get("token", None) is not None:
762
+ raise ValueError(
763
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
764
+ )
765
+ kwargs["token"] = use_auth_token
766
+
767
+ config = kwargs.pop("config", None)
768
+ kwargs["_from_auto"] = True
769
+
770
+ use_fast = kwargs.pop("use_fast", True)
771
+ tokenizer_type = kwargs.pop("tokenizer_type", None)
772
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
773
+
774
+ # First, let's see whether the tokenizer_type is passed so that we can leverage it
775
+ if tokenizer_type is not None:
776
+ tokenizer_class = None
777
+ tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
778
+
779
+ if tokenizer_class_tuple is None:
780
+ raise ValueError(
781
+ f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of "
782
+ f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}."
783
+ )
784
+
785
+ tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple
786
+
787
+ if use_fast:
788
+ if tokenizer_fast_class_name is not None:
789
+ tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
790
+ else:
791
+ logger.warning(
792
+ "`use_fast` is set to `True` but the tokenizer class does not have a fast version. "
793
+ " Falling back to the slow version."
794
+ )
795
+ if tokenizer_class is None:
796
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
797
+
798
+ if tokenizer_class is None:
799
+ raise ValueError(f"Tokenizer class {tokenizer_class_name} is not currently imported.")
800
+
801
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
802
+
803
+ # Next, let's try to use the tokenizer_config file to get the tokenizer class.
804
+ tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
805
+ if "_commit_hash" in tokenizer_config:
806
+ kwargs["_commit_hash"] = tokenizer_config["_commit_hash"]
807
+ config_tokenizer_class = tokenizer_config.get("tokenizer_class")
808
+ tokenizer_auto_map = None
809
+ if "auto_map" in tokenizer_config:
810
+ if isinstance(tokenizer_config["auto_map"], (tuple, list)):
811
+ # Legacy format for dynamic tokenizers
812
+ tokenizer_auto_map = tokenizer_config["auto_map"]
813
+ else:
814
+ tokenizer_auto_map = tokenizer_config["auto_map"].get("AutoTokenizer", None)
815
+
816
+ # If that did not work, let's try to use the config.
817
+ if config_tokenizer_class is None:
818
+ if not isinstance(config, PretrainedConfig):
819
+ config = AutoConfig.from_pretrained(
820
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
821
+ )
822
+ config_tokenizer_class = config.tokenizer_class
823
+ if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map:
824
+ tokenizer_auto_map = config.auto_map["AutoTokenizer"]
825
+
826
+ has_remote_code = tokenizer_auto_map is not None
827
+ has_local_code = type(config) in TOKENIZER_MAPPING or (
828
+ config_tokenizer_class is not None
829
+ and (
830
+ tokenizer_class_from_name(config_tokenizer_class) is not None
831
+ or tokenizer_class_from_name(config_tokenizer_class + "Fast") is not None
832
+ )
833
+ )
834
+ trust_remote_code = resolve_trust_remote_code(
835
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
836
+ )
837
+
838
+ if has_remote_code and trust_remote_code:
839
+ if use_fast and tokenizer_auto_map[1] is not None:
840
+ class_ref = tokenizer_auto_map[1]
841
+ else:
842
+ class_ref = tokenizer_auto_map[0]
843
+ tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
844
+ _ = kwargs.pop("code_revision", None)
845
+ if os.path.isdir(pretrained_model_name_or_path):
846
+ tokenizer_class.register_for_auto_class()
847
+ return tokenizer_class.from_pretrained(
848
+ pretrained_model_name_or_path, *inputs, trust_remote_code=trust_remote_code, **kwargs
849
+ )
850
+ elif config_tokenizer_class is not None:
851
+ tokenizer_class = None
852
+ if use_fast and not config_tokenizer_class.endswith("Fast"):
853
+ tokenizer_class_candidate = f"{config_tokenizer_class}Fast"
854
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
855
+ if tokenizer_class is None:
856
+ tokenizer_class_candidate = config_tokenizer_class
857
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
858
+ if tokenizer_class is None:
859
+ raise ValueError(
860
+ f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
861
+ )
862
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
863
+
864
+ # Otherwise we have to be creative.
865
+ # if model is an encoder decoder, the encoder tokenizer class is used by default
866
+ if isinstance(config, EncoderDecoderConfig):
867
+ if type(config.decoder) is not type(config.encoder): # noqa: E721
868
+ logger.warning(
869
+ f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
870
+ f"config class: {config.decoder.__class__}. It is not recommended to use the "
871
+ "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
872
+ "specific tokenizer classes."
873
+ )
874
+ config = config.encoder
875
+
876
+ model_type = config_class_to_model_type(type(config).__name__)
877
+ if model_type is not None:
878
+ tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
879
+ if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
880
+ return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
881
+ else:
882
+ if tokenizer_class_py is not None:
883
+ return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
884
+ else:
885
+ raise ValueError(
886
+ "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
887
+ "in order to use this tokenizer."
888
+ )
889
+
890
+ raise ValueError(
891
+ f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n"
892
+ f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}."
893
+ )
894
+
895
+ def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):
896
+ """
897
+ Register a new tokenizer in this mapping.
898
+
899
+
900
+ Args:
901
+ config_class ([`PretrainedConfig`]):
902
+ The configuration corresponding to the model to register.
903
+ slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
904
+ The slow tokenizer to register.
905
+ fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
906
+ The fast tokenizer to register.
907
+ """
908
+ if slow_tokenizer_class is None and fast_tokenizer_class is None:
909
+ raise ValueError("You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class")
910
+ if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
911
+ raise ValueError("You passed a fast tokenizer in the `slow_tokenizer_class`.")
912
+ if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
913
+ raise ValueError("You passed a slow tokenizer in the `fast_tokenizer_class`.")
914
+
915
+ if (
916
+ slow_tokenizer_class is not None
917
+ and fast_tokenizer_class is not None
918
+ and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast)
919
+ and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class
920
+ ):
921
+ raise ValueError(
922
+ "The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not "
923
+ "consistent with the slow tokenizer class you passed (fast tokenizer has "
924
+ f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those "
925
+ "so they match!"
926
+ )
927
+
928
+ # Avoid resetting a set slow/fast tokenizer if we are passing just the other ones.
929
+ if config_class in TOKENIZER_MAPPING._extra_content:
930
+ existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
931
+ if slow_tokenizer_class is None:
932
+ slow_tokenizer_class = existing_slow
933
+ if fast_tokenizer_class is None:
934
+ fast_tokenizer_class = existing_fast
935
+
936
+ TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok)
venv/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
28
+ "tokenization_lxmert": ["LxmertTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_lxmert"] = [
46
+ "LxmertEncoder",
47
+ "LxmertForPreTraining",
48
+ "LxmertForQuestionAnswering",
49
+ "LxmertModel",
50
+ "LxmertPreTrainedModel",
51
+ "LxmertVisualFeatureEncoder",
52
+ "LxmertXLayer",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_lxmert"] = [
62
+ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFLxmertForPreTraining",
64
+ "TFLxmertMainLayer",
65
+ "TFLxmertModel",
66
+ "TFLxmertPreTrainedModel",
67
+ "TFLxmertVisualFeatureEncoder",
68
+ ]
69
+
70
+
71
+ if TYPE_CHECKING:
72
+ from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
73
+ from .tokenization_lxmert import LxmertTokenizer
74
+
75
+ try:
76
+ if not is_tokenizers_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .tokenization_lxmert_fast import LxmertTokenizerFast
82
+
83
+ try:
84
+ if not is_torch_available():
85
+ raise OptionalDependencyNotAvailable()
86
+ except OptionalDependencyNotAvailable:
87
+ pass
88
+ else:
89
+ from .modeling_lxmert import (
90
+ LxmertEncoder,
91
+ LxmertForPreTraining,
92
+ LxmertForQuestionAnswering,
93
+ LxmertModel,
94
+ LxmertPreTrainedModel,
95
+ LxmertVisualFeatureEncoder,
96
+ LxmertXLayer,
97
+ )
98
+
99
+ try:
100
+ if not is_tf_available():
101
+ raise OptionalDependencyNotAvailable()
102
+ except OptionalDependencyNotAvailable:
103
+ pass
104
+ else:
105
+ from .modeling_tf_lxmert import (
106
+ TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
107
+ TFLxmertForPreTraining,
108
+ TFLxmertMainLayer,
109
+ TFLxmertModel,
110
+ TFLxmertPreTrainedModel,
111
+ TFLxmertVisualFeatureEncoder,
112
+ )
113
+
114
+ else:
115
+ import sys
116
+
117
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc ADDED
Binary file (45.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc ADDED
Binary file (51.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018, Hao Tan, Mohit Bansal
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LXMERT model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class LxmertConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used
31
+ to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
32
+ a configuration with the defaults will yield a similar configuration to that of the Lxmert
33
+ [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 30522):
41
+ Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_qa_labels (`int`, *optional*, defaults to 9500):
48
+ This represents the total number of different question answering (QA) labels there are. If using more than
49
+ one dataset with QA, the user will need to account for the total number of labels that all of the datasets
50
+ have in total.
51
+ num_object_labels (`int`, *optional*, defaults to 1600):
52
+ This represents the total number of semantically unique objects that lxmert will be able to classify a
53
+ pooled-object feature as belonging too.
54
+ num_attr_labels (`int`, *optional*, defaults to 400):
55
+ This represents the total number of semantically unique attributes that lxmert will be able to classify a
56
+ pooled-object feature as possessing.
57
+ intermediate_size (`int`, *optional*, defaults to 3072):
58
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
59
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
62
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention probabilities.
66
+ max_position_embeddings (`int`, *optional*, defaults to 512):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ type_vocab_size (`int`, *optional*, defaults to 2):
70
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
74
+ The epsilon used by the layer normalization layers.
75
+ l_layers (`int`, *optional*, defaults to 9):
76
+ Number of hidden layers in the Transformer language encoder.
77
+ x_layers (`int`, *optional*, defaults to 5):
78
+ Number of hidden layers in the Transformer cross modality encoder.
79
+ r_layers (`int`, *optional*, defaults to 5):
80
+ Number of hidden layers in the Transformer visual encoder.
81
+ visual_feat_dim (`int`, *optional*, defaults to 2048):
82
+ This represents the last dimension of the pooled-object features used as input for the model, representing
83
+ the size of each object feature itself.
84
+ visual_pos_dim (`int`, *optional*, defaults to 4):
85
+ This represents the number of spacial features that are mixed into the visual features. The default is set
86
+ to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
87
+ visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
88
+ This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
89
+ decided to train with multiple vision-based loss objectives.
90
+ task_matched (`bool`, *optional*, defaults to `True`):
91
+ This task is used for sentence-image matching. If the sentence correctly describes the image the label will
92
+ be 1. If the sentence does not correctly describe the image, the label will be 0.
93
+ task_mask_lm (`bool`, *optional*, defaults to `True`):
94
+ Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
95
+ objective.
96
+ task_obj_predict (`bool`, *optional*, defaults to `True`):
97
+ Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
98
+ task_qa (`bool`, *optional*, defaults to `True`):
99
+ Whether or not to add the question-answering loss to the objective
100
+ visual_obj_loss (`bool`, *optional*, defaults to `True`):
101
+ Whether or not to calculate the object-prediction loss objective
102
+ visual_attr_loss (`bool`, *optional*, defaults to `True`):
103
+ Whether or not to calculate the attribute-prediction loss objective
104
+ visual_feat_loss (`bool`, *optional*, defaults to `True`):
105
+ Whether or not to calculate the feature-regression loss objective
106
+ """
107
+
108
+ model_type = "lxmert"
109
+ attribute_map = {}
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=30522,
114
+ hidden_size=768,
115
+ num_attention_heads=12,
116
+ num_qa_labels=9500,
117
+ num_object_labels=1600,
118
+ num_attr_labels=400,
119
+ intermediate_size=3072,
120
+ hidden_act="gelu",
121
+ hidden_dropout_prob=0.1,
122
+ attention_probs_dropout_prob=0.1,
123
+ max_position_embeddings=512,
124
+ type_vocab_size=2,
125
+ initializer_range=0.02,
126
+ layer_norm_eps=1e-12,
127
+ l_layers=9,
128
+ x_layers=5,
129
+ r_layers=5,
130
+ visual_feat_dim=2048,
131
+ visual_pos_dim=4,
132
+ visual_loss_normalizer=6.67,
133
+ task_matched=True,
134
+ task_mask_lm=True,
135
+ task_obj_predict=True,
136
+ task_qa=True,
137
+ visual_obj_loss=True,
138
+ visual_attr_loss=True,
139
+ visual_feat_loss=True,
140
+ **kwargs,
141
+ ):
142
+ self.vocab_size = vocab_size
143
+ self.hidden_size = hidden_size
144
+ self.num_attention_heads = num_attention_heads
145
+ self.hidden_act = hidden_act
146
+ self.intermediate_size = intermediate_size
147
+ self.hidden_dropout_prob = hidden_dropout_prob
148
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
149
+ self.max_position_embeddings = max_position_embeddings
150
+ self.type_vocab_size = type_vocab_size
151
+ self.initializer_range = initializer_range
152
+ self.layer_norm_eps = layer_norm_eps
153
+ self.num_qa_labels = num_qa_labels
154
+ self.num_object_labels = num_object_labels
155
+ self.num_attr_labels = num_attr_labels
156
+ self.l_layers = l_layers
157
+ self.x_layers = x_layers
158
+ self.r_layers = r_layers
159
+ self.visual_feat_dim = visual_feat_dim
160
+ self.visual_pos_dim = visual_pos_dim
161
+ self.visual_loss_normalizer = visual_loss_normalizer
162
+ self.task_matched = task_matched
163
+ self.task_mask_lm = task_mask_lm
164
+ self.task_obj_predict = task_obj_predict
165
+ self.task_qa = task_qa
166
+ self.visual_obj_loss = visual_obj_loss
167
+ self.visual_attr_loss = visual_attr_loss
168
+ self.visual_feat_loss = visual_feat_loss
169
+ self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
170
+ super().__init__(**kwargs)
venv/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert LXMERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = LxmertConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = LxmertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_lxmert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ args = parser.parse_args()
60
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LXMERT model."""
16
+
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import Dict, Optional, Tuple, Union
23
+
24
+ import torch
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss, SmoothL1Loss
27
+
28
+ from ...activations import ACT2FN, gelu
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_lxmert import LxmertConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
44
+ _CONFIG_FOR_DOC = "LxmertConfig"
45
+
46
+
47
+ class GeLU(nn.Module):
48
+ def __init__(self):
49
+ super().__init__()
50
+
51
+ def forward(self, x):
52
+ return gelu(x)
53
+
54
+
55
+ @dataclass
56
+ class LxmertModelOutput(ModelOutput):
57
+ """
58
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
59
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
60
+ encoder")
61
+
62
+
63
+ Args:
64
+ language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
65
+ Sequence of hidden-states at the output of the last layer of the language encoder.
66
+ vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
67
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
68
+ pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
69
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
70
+ by a Linear layer and a Tanh activation function. The Linear
71
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
72
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
73
+ shape `(batch_size, sequence_length, hidden_size)`.
74
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
75
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
76
+ shape `(batch_size, sequence_length, hidden_size)`.
77
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
78
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
79
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
80
+ the self-attention heads.
81
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
82
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
83
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
84
+ the self-attention heads.
85
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
86
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
87
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
88
+ the self-attention heads.
89
+ """
90
+
91
+ language_output: Optional[torch.FloatTensor] = None
92
+ vision_output: Optional[torch.FloatTensor] = None
93
+ pooled_output: Optional[torch.FloatTensor] = None
94
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
95
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
96
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
97
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
98
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
99
+
100
+
101
+ @dataclass
102
+ class LxmertForQuestionAnsweringOutput(ModelOutput):
103
+ """
104
+ Output type of [`LxmertForQuestionAnswering`].
105
+
106
+ Args:
107
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
108
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
109
+ (classification) loss.k.
110
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*):
111
+ Prediction scores of question answering objective (classification).
112
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
113
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
114
+ shape `(batch_size, sequence_length, hidden_size)`.
115
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
116
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
117
+ shape `(batch_size, sequence_length, hidden_size)`.
118
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
119
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
120
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
121
+ the self-attention heads.
122
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
123
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
124
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
125
+ the self-attention heads.
126
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
127
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
128
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
129
+ the self-attention heads.
130
+ """
131
+
132
+ loss: Optional[torch.FloatTensor] = None
133
+ question_answering_score: Optional[torch.FloatTensor] = None
134
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
135
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
136
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
137
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
138
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
139
+
140
+
141
+ @dataclass
142
+ class LxmertForPreTrainingOutput(ModelOutput):
143
+ """
144
+ Output type of [`LxmertForPreTraining`].
145
+
146
+ Args:
147
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
148
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
149
+ (classification) loss.
150
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
151
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
152
+ cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`):
153
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
154
+ continuation before SoftMax).
155
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`):
156
+ Prediction scores of question answering objective (classification).
157
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
158
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
159
+ shape `(batch_size, sequence_length, hidden_size)`.
160
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
161
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
162
+ shape `(batch_size, sequence_length, hidden_size)`.
163
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
164
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
165
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
166
+ the self-attention heads.
167
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
168
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
169
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
170
+ the self-attention heads.
171
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
172
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
173
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
174
+ the self-attention heads.
175
+
176
+ """
177
+
178
+ loss: Optional[torch.FloatTensor] = None
179
+ prediction_logits: Optional[torch.FloatTensor] = None
180
+ cross_relationship_score: Optional[torch.FloatTensor] = None
181
+ question_answering_score: Optional[torch.FloatTensor] = None
182
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
183
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
184
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
185
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
186
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
187
+
188
+
189
+ def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
190
+ """Load tf checkpoints in a pytorch model."""
191
+ try:
192
+ import re
193
+
194
+ import numpy as np
195
+ import tensorflow as tf
196
+ except ImportError:
197
+ logger.error(
198
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
199
+ "https://www.tensorflow.org/install/ for installation instructions."
200
+ )
201
+ raise
202
+ tf_path = os.path.abspath(tf_checkpoint_path)
203
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
204
+ # Load weights from TF model
205
+ init_vars = tf.train.list_variables(tf_path)
206
+ names = []
207
+ arrays = []
208
+ for name, shape in init_vars:
209
+ logger.info(f"Loading TF weight {name} with shape {shape}")
210
+ array = tf.train.load_variable(tf_path, name)
211
+ names.append(name)
212
+ arrays.append(array)
213
+
214
+ for name, array in zip(names, arrays):
215
+ name = name.split("/")
216
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
217
+ # which are not required for using pretrained model
218
+ if any(
219
+ n
220
+ in [
221
+ "adam_v",
222
+ "adam_m",
223
+ "AdamWeightDecayOptimizer",
224
+ "AdamWeightDecayOptimizer_1",
225
+ "global_step",
226
+ ]
227
+ for n in name
228
+ ):
229
+ logger.info(f"Skipping {'/'.join(name)}")
230
+ continue
231
+ pointer = model
232
+ for m_name in name:
233
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
234
+ scope_names = re.split(r"_(\d+)", m_name)
235
+ else:
236
+ scope_names = [m_name]
237
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
238
+ pointer = getattr(pointer, "weight")
239
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
240
+ pointer = getattr(pointer, "bias")
241
+ elif scope_names[0] == "output_weights":
242
+ pointer = getattr(pointer, "weight")
243
+ elif scope_names[0] == "squad":
244
+ pointer = getattr(pointer, "classifier")
245
+ else:
246
+ try:
247
+ pointer = getattr(pointer, scope_names[0])
248
+ except AttributeError:
249
+ logger.info(f"Skipping {'/'.join(name)}")
250
+ continue
251
+ if len(scope_names) >= 2:
252
+ num = int(scope_names[1])
253
+ pointer = pointer[num]
254
+ if m_name[-11:] == "_embeddings":
255
+ pointer = getattr(pointer, "weight")
256
+ elif m_name == "kernel":
257
+ array = np.transpose(array)
258
+ try:
259
+ assert pointer.shape == array.shape
260
+ except AssertionError as e:
261
+ e.args += (pointer.shape, array.shape)
262
+ raise
263
+ logger.info(f"Initialize PyTorch weight {name}")
264
+ pointer.data = torch.from_numpy(array)
265
+ return model
266
+
267
+
268
+ class LxmertEmbeddings(nn.Module):
269
+ """Construct the embeddings from word, position and token_type embeddings."""
270
+
271
+ def __init__(self, config):
272
+ super().__init__()
273
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
274
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
275
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
276
+
277
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
278
+ # any TensorFlow checkpoint file
279
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
280
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
281
+
282
+ def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
283
+ if input_ids is not None:
284
+ input_shape = input_ids.size()
285
+ device = input_ids.device
286
+ else:
287
+ input_shape = inputs_embeds.size()[:-1]
288
+ device = inputs_embeds.device
289
+ seq_length = input_shape[1]
290
+
291
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
292
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
293
+
294
+ if token_type_ids is None:
295
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
296
+
297
+ if inputs_embeds is None:
298
+ inputs_embeds = self.word_embeddings(input_ids)
299
+ position_embeddings = self.position_embeddings(position_ids)
300
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
301
+
302
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
303
+ embeddings = self.LayerNorm(embeddings)
304
+ embeddings = self.dropout(embeddings)
305
+ return embeddings
306
+
307
+
308
+ class LxmertAttention(nn.Module):
309
+ def __init__(self, config, ctx_dim=None):
310
+ super().__init__()
311
+ if config.hidden_size % config.num_attention_heads != 0:
312
+ raise ValueError(
313
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
314
+ f"heads ({config.num_attention_heads})"
315
+ )
316
+ self.num_attention_heads = config.num_attention_heads
317
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
318
+ self.head_size = self.num_attention_heads * self.attention_head_size
319
+
320
+ # visual_dim = 2048
321
+ if ctx_dim is None:
322
+ ctx_dim = config.hidden_size
323
+ self.query = nn.Linear(config.hidden_size, self.head_size)
324
+ self.key = nn.Linear(ctx_dim, self.head_size)
325
+ self.value = nn.Linear(ctx_dim, self.head_size)
326
+
327
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
328
+
329
+ def transpose_for_scores(self, x):
330
+ new_x_shape = x.size()[:-1] + (
331
+ self.num_attention_heads,
332
+ self.attention_head_size,
333
+ )
334
+ x = x.view(new_x_shape)
335
+ return x.permute(0, 2, 1, 3)
336
+
337
+ def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
338
+ mixed_query_layer = self.query(hidden_states)
339
+ mixed_key_layer = self.key(context)
340
+ mixed_value_layer = self.value(context)
341
+
342
+ query_layer = self.transpose_for_scores(mixed_query_layer)
343
+ key_layer = self.transpose_for_scores(mixed_key_layer)
344
+ value_layer = self.transpose_for_scores(mixed_value_layer)
345
+
346
+ # Take the dot product between "query" and "key" to get the raw attention scores.
347
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
348
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
349
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
350
+ if attention_mask is not None:
351
+ attention_scores = attention_scores + attention_mask
352
+
353
+ # Normalize the attention scores to probabilities.
354
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
355
+
356
+ # This is actually dropping out entire tokens to attend to, which might
357
+ # seem a bit unusual, but is taken from the original Transformer paper.
358
+ attention_probs = self.dropout(attention_probs)
359
+
360
+ context_layer = torch.matmul(attention_probs, value_layer)
361
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
362
+ new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
363
+ context_layer = context_layer.view(new_context_layer_shape)
364
+
365
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
366
+ return outputs
367
+
368
+
369
+ class LxmertAttentionOutput(nn.Module):
370
+ def __init__(self, config):
371
+ super().__init__()
372
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
373
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
374
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
375
+
376
+ def forward(self, hidden_states, input_tensor):
377
+ hidden_states = self.dense(hidden_states)
378
+ hidden_states = self.dropout(hidden_states)
379
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
380
+ return hidden_states
381
+
382
+
383
+ class LxmertCrossAttentionLayer(nn.Module):
384
+ def __init__(self, config):
385
+ super().__init__()
386
+ self.att = LxmertAttention(config)
387
+ self.output = LxmertAttentionOutput(config)
388
+
389
+ def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
390
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
391
+ if output_attentions:
392
+ attention_probs = output[1]
393
+ attention_output = self.output(output[0], input_tensor)
394
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
395
+ return outputs
396
+
397
+
398
+ class LxmertSelfAttentionLayer(nn.Module):
399
+ def __init__(self, config):
400
+ super().__init__()
401
+ self.self = LxmertAttention(config)
402
+ self.output = LxmertAttentionOutput(config)
403
+
404
+ def forward(self, input_tensor, attention_mask, output_attentions=False):
405
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
406
+ output = self.self(
407
+ input_tensor,
408
+ input_tensor,
409
+ attention_mask,
410
+ output_attentions=output_attentions,
411
+ )
412
+ if output_attentions:
413
+ attention_probs = output[1]
414
+ attention_output = self.output(output[0], input_tensor)
415
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
416
+ return outputs
417
+
418
+
419
+ class LxmertIntermediate(nn.Module):
420
+ def __init__(self, config):
421
+ super().__init__()
422
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
423
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
424
+
425
+ def forward(self, hidden_states):
426
+ hidden_states = self.dense(hidden_states)
427
+ hidden_states = self.intermediate_act_fn(hidden_states)
428
+ return hidden_states
429
+
430
+
431
+ class LxmertOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states, input_tensor):
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ class LxmertLayer(nn.Module):
446
+ def __init__(self, config):
447
+ super().__init__()
448
+ self.attention = LxmertSelfAttentionLayer(config)
449
+ self.intermediate = LxmertIntermediate(config)
450
+ self.output = LxmertOutput(config)
451
+
452
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
453
+ outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
454
+ attention_output = outputs[0]
455
+ intermediate_output = self.intermediate(attention_output)
456
+ layer_output = self.output(intermediate_output, attention_output)
457
+ outputs = (layer_output,) + outputs[1:] # add attentions if we output them
458
+ return outputs
459
+
460
+
461
+ class LxmertXLayer(nn.Module):
462
+ def __init__(self, config):
463
+ super().__init__()
464
+ # The cross-attention Layer
465
+ self.visual_attention = LxmertCrossAttentionLayer(config)
466
+
467
+ # Self-attention Layers
468
+ self.lang_self_att = LxmertSelfAttentionLayer(config)
469
+ self.visn_self_att = LxmertSelfAttentionLayer(config)
470
+
471
+ # Intermediate and Output Layers (FFNs)
472
+ self.lang_inter = LxmertIntermediate(config)
473
+ self.lang_output = LxmertOutput(config)
474
+ self.visn_inter = LxmertIntermediate(config)
475
+ self.visn_output = LxmertOutput(config)
476
+
477
+ def cross_att(
478
+ self,
479
+ lang_input,
480
+ lang_attention_mask,
481
+ visual_input,
482
+ visual_attention_mask,
483
+ output_x_attentions=False,
484
+ ):
485
+ # Cross Attention
486
+ lang_att_output = self.visual_attention(
487
+ lang_input,
488
+ visual_input,
489
+ ctx_att_mask=visual_attention_mask,
490
+ output_attentions=output_x_attentions,
491
+ )
492
+ visual_att_output = self.visual_attention(
493
+ visual_input,
494
+ lang_input,
495
+ ctx_att_mask=lang_attention_mask,
496
+ output_attentions=False,
497
+ )
498
+ return lang_att_output, visual_att_output
499
+
500
+ def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
501
+ # Self Attention
502
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
503
+ visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
504
+ return lang_att_output[0], visual_att_output[0]
505
+
506
+ def output_fc(self, lang_input, visual_input):
507
+ # FC layers
508
+ lang_inter_output = self.lang_inter(lang_input)
509
+ visual_inter_output = self.visn_inter(visual_input)
510
+
511
+ # Layer output
512
+ lang_output = self.lang_output(lang_inter_output, lang_input)
513
+ visual_output = self.visn_output(visual_inter_output, visual_input)
514
+
515
+ return lang_output, visual_output
516
+
517
+ def forward(
518
+ self,
519
+ lang_feats,
520
+ lang_attention_mask,
521
+ visual_feats,
522
+ visual_attention_mask,
523
+ output_attentions=False,
524
+ ):
525
+ lang_att_output, visual_att_output = self.cross_att(
526
+ lang_input=lang_feats,
527
+ lang_attention_mask=lang_attention_mask,
528
+ visual_input=visual_feats,
529
+ visual_attention_mask=visual_attention_mask,
530
+ output_x_attentions=output_attentions,
531
+ )
532
+ attention_probs = lang_att_output[1:]
533
+ lang_att_output, visual_att_output = self.self_att(
534
+ lang_att_output[0],
535
+ lang_attention_mask,
536
+ visual_att_output[0],
537
+ visual_attention_mask,
538
+ )
539
+
540
+ lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
541
+ return (
542
+ (
543
+ lang_output,
544
+ visual_output,
545
+ attention_probs[0],
546
+ )
547
+ if output_attentions
548
+ else (lang_output, visual_output)
549
+ )
550
+
551
+
552
+ class LxmertVisualFeatureEncoder(nn.Module):
553
+ def __init__(self, config):
554
+ super().__init__()
555
+ feat_dim = config.visual_feat_dim
556
+ pos_dim = config.visual_pos_dim
557
+
558
+ # Object feature encoding
559
+ self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
560
+ self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
561
+
562
+ # Box position encoding
563
+ self.box_fc = nn.Linear(pos_dim, config.hidden_size)
564
+ self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
565
+
566
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
567
+
568
+ def forward(self, visual_feats, visual_pos):
569
+ x = self.visn_fc(visual_feats)
570
+ x = self.visn_layer_norm(x)
571
+ y = self.box_fc(visual_pos)
572
+ y = self.box_layer_norm(y)
573
+ output = (x + y) / 2
574
+
575
+ output = self.dropout(output)
576
+ return output
577
+
578
+
579
+ class LxmertEncoder(nn.Module):
580
+ def __init__(self, config):
581
+ super().__init__()
582
+
583
+ # Obj-level image embedding layer
584
+ self.visn_fc = LxmertVisualFeatureEncoder(config)
585
+ self.config = config
586
+
587
+ # Number of layers
588
+ self.num_l_layers = config.l_layers
589
+ self.num_x_layers = config.x_layers
590
+ self.num_r_layers = config.r_layers
591
+
592
+ # Layers
593
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
594
+ self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
595
+ self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
596
+ self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
597
+
598
+ def forward(
599
+ self,
600
+ lang_feats,
601
+ lang_attention_mask,
602
+ visual_feats,
603
+ visual_pos,
604
+ visual_attention_mask=None,
605
+ output_attentions=None,
606
+ ):
607
+ vision_hidden_states = ()
608
+ language_hidden_states = ()
609
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
610
+ language_attentions = () if output_attentions or self.config.output_attentions else None
611
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
612
+
613
+ visual_feats = self.visn_fc(visual_feats, visual_pos)
614
+
615
+ # Run language layers
616
+ for layer_module in self.layer:
617
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
618
+ lang_feats = l_outputs[0]
619
+ language_hidden_states = language_hidden_states + (lang_feats,)
620
+ if language_attentions is not None:
621
+ language_attentions = language_attentions + (l_outputs[1],)
622
+
623
+ # Run relational layers
624
+ for layer_module in self.r_layers:
625
+ v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
626
+ visual_feats = v_outputs[0]
627
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
628
+ if vision_attentions is not None:
629
+ vision_attentions = vision_attentions + (v_outputs[1],)
630
+
631
+ # Run cross-modality layers
632
+ for layer_module in self.x_layers:
633
+ x_outputs = layer_module(
634
+ lang_feats,
635
+ lang_attention_mask,
636
+ visual_feats,
637
+ visual_attention_mask,
638
+ output_attentions=output_attentions,
639
+ )
640
+ lang_feats, visual_feats = x_outputs[:2]
641
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
642
+ language_hidden_states = language_hidden_states + (lang_feats,)
643
+ if cross_encoder_attentions is not None:
644
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
645
+ visual_encoder_outputs = (
646
+ vision_hidden_states,
647
+ vision_attentions if output_attentions else None,
648
+ )
649
+ lang_encoder_outputs = (
650
+ language_hidden_states,
651
+ language_attentions if output_attentions else None,
652
+ )
653
+ return (
654
+ visual_encoder_outputs,
655
+ lang_encoder_outputs,
656
+ cross_encoder_attentions if output_attentions else None,
657
+ )
658
+
659
+
660
+ class LxmertPooler(nn.Module):
661
+ def __init__(self, config):
662
+ super(LxmertPooler, self).__init__()
663
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
664
+ self.activation = nn.Tanh()
665
+
666
+ def forward(self, hidden_states):
667
+ # We "pool" the model by simply taking the hidden state corresponding
668
+ # to the first token.
669
+ first_token_tensor = hidden_states[:, 0]
670
+ pooled_output = self.dense(first_token_tensor)
671
+ pooled_output = self.activation(pooled_output)
672
+ return pooled_output
673
+
674
+
675
+ class LxmertPredictionHeadTransform(nn.Module):
676
+ def __init__(self, config):
677
+ super(LxmertPredictionHeadTransform, self).__init__()
678
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
679
+ self.transform_act_fn = ACT2FN[config.hidden_act]
680
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
681
+
682
+ def forward(self, hidden_states):
683
+ hidden_states = self.dense(hidden_states)
684
+ hidden_states = self.transform_act_fn(hidden_states)
685
+ hidden_states = self.LayerNorm(hidden_states)
686
+ return hidden_states
687
+
688
+
689
+ class LxmertLMPredictionHead(nn.Module):
690
+ def __init__(self, config, lxmert_model_embedding_weights):
691
+ super(LxmertLMPredictionHead, self).__init__()
692
+ self.transform = LxmertPredictionHeadTransform(config)
693
+
694
+ # The output weights are the same as the input embeddings, but there is
695
+ # an output-only bias for each token.
696
+ self.decoder = nn.Linear(
697
+ lxmert_model_embedding_weights.size(1),
698
+ lxmert_model_embedding_weights.size(0),
699
+ bias=False,
700
+ )
701
+ self.decoder.weight = lxmert_model_embedding_weights
702
+ self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
703
+
704
+ def forward(self, hidden_states):
705
+ hidden_states = self.transform(hidden_states)
706
+ hidden_states = self.decoder(hidden_states) + self.bias
707
+ return hidden_states
708
+
709
+
710
+ class LxmertVisualAnswerHead(nn.Module):
711
+ def __init__(self, config, num_labels):
712
+ super().__init__()
713
+ hid_dim = config.hidden_size
714
+ self.logit_fc = nn.Sequential(
715
+ nn.Linear(hid_dim, hid_dim * 2),
716
+ GeLU(),
717
+ nn.LayerNorm(hid_dim * 2, eps=1e-12),
718
+ nn.Linear(hid_dim * 2, num_labels),
719
+ )
720
+
721
+ def forward(self, hidden_states):
722
+ return self.logit_fc(hidden_states)
723
+
724
+
725
+ class LxmertVisualObjHead(nn.Module):
726
+ def __init__(self, config):
727
+ super().__init__()
728
+ self.transform = LxmertPredictionHeadTransform(config)
729
+ # Decide the use of visual losses
730
+ visual_losses = {}
731
+ if config.visual_obj_loss:
732
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
733
+ if config.visual_attr_loss:
734
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
735
+ if config.visual_feat_loss:
736
+ visual_losses["feat"] = {
737
+ "shape": (-1, config.visual_feat_dim),
738
+ "num": config.visual_feat_dim,
739
+ }
740
+ self.visual_losses = visual_losses
741
+
742
+ # The output weights are the same as the input embeddings, but there is
743
+ # an output-only bias for each token.
744
+ self.decoder_dict = nn.ModuleDict(
745
+ {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
746
+ )
747
+
748
+ def forward(self, hidden_states):
749
+ hidden_states = self.transform(hidden_states)
750
+ output = {}
751
+ for key in self.visual_losses:
752
+ output[key] = self.decoder_dict[key](hidden_states)
753
+ return output
754
+
755
+
756
+ class LxmertPreTrainingHeads(nn.Module):
757
+ def __init__(self, config, lxmert_model_embedding_weights):
758
+ super(LxmertPreTrainingHeads, self).__init__()
759
+ self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
760
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
761
+
762
+ def forward(self, sequence_output, pooled_output):
763
+ prediction_scores = self.predictions(sequence_output)
764
+ seq_relationship_score = self.seq_relationship(pooled_output)
765
+ return prediction_scores, seq_relationship_score
766
+
767
+
768
+ class LxmertPreTrainedModel(PreTrainedModel):
769
+ """
770
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
771
+ models.
772
+ """
773
+
774
+ config_class = LxmertConfig
775
+ load_tf_weights = load_tf_weights_in_lxmert
776
+ base_model_prefix = "lxmert"
777
+
778
+ def _init_weights(self, module):
779
+ """Initialize the weights"""
780
+ if isinstance(module, nn.Linear):
781
+ # Slightly different from the TF version which uses truncated_normal for initialization
782
+ # cf https://github.com/pytorch/pytorch/pull/5617
783
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
784
+ if module.bias is not None:
785
+ module.bias.data.zero_()
786
+ elif isinstance(module, nn.Embedding):
787
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
788
+ if module.padding_idx is not None:
789
+ module.weight.data[module.padding_idx].zero_()
790
+ elif isinstance(module, nn.LayerNorm):
791
+ module.bias.data.zero_()
792
+ module.weight.data.fill_(1.0)
793
+
794
+
795
+ LXMERT_START_DOCSTRING = r"""
796
+
797
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
798
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
799
+ model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual
800
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
801
+ for question answering attribute prediction, and object tag prediction.
802
+
803
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
804
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
805
+ etc.)
806
+
807
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
808
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
809
+ and behavior.
810
+
811
+ Parameters:
812
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
813
+ Initializing with a config file does not load the weights associated with the model, only the
814
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
815
+ """
816
+
817
+ LXMERT_INPUTS_DOCSTRING = r"""
818
+
819
+ Args:
820
+ input_ids (`torch.LongTensor` of shape `({0})`):
821
+ Indices of input sequence tokens in the vocabulary.
822
+
823
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
824
+ [`PreTrainedTokenizer.__call__`] for details.
825
+
826
+ [What are input IDs?](../glossary#input-ids)
827
+ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
828
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
829
+ faster-RCNN model)
830
+
831
+ These are currently not provided by the transformers library.
832
+ visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
833
+ This input represents spacial features corresponding to their relative (via index) visual features. The
834
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
835
+ 1.
836
+
837
+ These are currently not provided by the transformers library.
838
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
839
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
840
+
841
+ - 1 for tokens that are **not masked**,
842
+ - 0 for tokens that are **masked**.
843
+
844
+ [What are attention masks?](../glossary#attention-mask)
845
+ visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
846
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
847
+
848
+ - 1 for tokens that are **not masked**,
849
+ - 0 for tokens that are **masked**.
850
+
851
+ [What are attention masks?](../glossary#attention-mask)
852
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
853
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
854
+ 1]`:
855
+
856
+ - 0 corresponds to a *sentence A* token,
857
+ - 1 corresponds to a *sentence B* token.
858
+
859
+ [What are token type IDs?](../glossary#token-type-ids)
860
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
861
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
862
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
863
+ model's internal embedding lookup matrix.
864
+ output_attentions (`bool`, *optional*):
865
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
866
+ tensors for more detail.
867
+ output_hidden_states (`bool`, *optional*):
868
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
869
+ more detail.
870
+ return_dict (`bool`, *optional*):
871
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
872
+ """
873
+
874
+
875
+ @add_start_docstrings(
876
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
877
+ LXMERT_START_DOCSTRING,
878
+ )
879
+ class LxmertModel(LxmertPreTrainedModel):
880
+ def __init__(self, config):
881
+ super().__init__(config)
882
+ self.embeddings = LxmertEmbeddings(config)
883
+ self.encoder = LxmertEncoder(config)
884
+ self.pooler = LxmertPooler(config)
885
+ # Initialize weights and apply final processing
886
+ self.post_init()
887
+
888
+ def get_input_embeddings(self):
889
+ return self.embeddings.word_embeddings
890
+
891
+ def set_input_embeddings(self, new_embeddings):
892
+ self.embeddings.word_embeddings = new_embeddings
893
+
894
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
895
+ @add_code_sample_docstrings(
896
+ checkpoint=_CHECKPOINT_FOR_DOC,
897
+ output_type=LxmertModelOutput,
898
+ config_class=_CONFIG_FOR_DOC,
899
+ )
900
+ def forward(
901
+ self,
902
+ input_ids: Optional[torch.LongTensor] = None,
903
+ visual_feats: Optional[torch.FloatTensor] = None,
904
+ visual_pos: Optional[torch.FloatTensor] = None,
905
+ attention_mask: Optional[torch.FloatTensor] = None,
906
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
907
+ token_type_ids: Optional[torch.LongTensor] = None,
908
+ inputs_embeds: Optional[torch.FloatTensor] = None,
909
+ output_attentions: Optional[bool] = None,
910
+ output_hidden_states: Optional[bool] = None,
911
+ return_dict: Optional[bool] = None,
912
+ ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]:
913
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
914
+ output_hidden_states = (
915
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
916
+ )
917
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
918
+
919
+ if input_ids is not None and inputs_embeds is not None:
920
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
921
+ elif input_ids is not None:
922
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
923
+ input_shape = input_ids.size()
924
+ elif inputs_embeds is not None:
925
+ input_shape = inputs_embeds.size()[:-1]
926
+ else:
927
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
928
+
929
+ if visual_feats is None:
930
+ raise ValueError("`visual_feats` cannot be `None`")
931
+ if visual_pos is None:
932
+ raise ValueError("`visual_pos` cannot be `None`")
933
+
934
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
935
+
936
+ if attention_mask is None:
937
+ attention_mask = torch.ones(input_shape, device=device)
938
+ if token_type_ids is None:
939
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
940
+
941
+ # We create a 3D attention mask from a 2D tensor mask.
942
+ # Sizes are [batch_size, 1, 1, to_seq_length]
943
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
944
+ # this attention mask is more simple than the triangular masking of causal attention
945
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
946
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
947
+
948
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
949
+ # masked positions, this operation will create a tensor which is 0.0 for
950
+ # positions we want to attend and the dtype's smallest value for masked positions.
951
+ # Since we are adding it to the raw scores before the softmax, this is
952
+ # effectively the same as removing these entirely.
953
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
954
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
955
+
956
+ # Process the visual attention mask
957
+ if visual_attention_mask is not None:
958
+ extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
959
+ extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
960
+ extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min
961
+ else:
962
+ extended_visual_attention_mask = None
963
+
964
+ # Positional Word Embeddings
965
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
966
+
967
+ # Run Lxmert encoder
968
+ encoder_outputs = self.encoder(
969
+ embedding_output,
970
+ extended_attention_mask,
971
+ visual_feats=visual_feats,
972
+ visual_pos=visual_pos,
973
+ visual_attention_mask=extended_visual_attention_mask,
974
+ output_attentions=output_attentions,
975
+ )
976
+
977
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
978
+ vision_hidden_states = visual_encoder_outputs[0]
979
+ language_hidden_states = lang_encoder_outputs[0]
980
+
981
+ all_attentions = ()
982
+ if output_attentions:
983
+ language_attentions = lang_encoder_outputs[1]
984
+ vision_attentions = visual_encoder_outputs[1]
985
+ cross_encoder_attentions = encoder_outputs[2]
986
+ all_attentions = (
987
+ language_attentions,
988
+ vision_attentions,
989
+ cross_encoder_attentions,
990
+ )
991
+
992
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
993
+
994
+ visual_output = vision_hidden_states[-1]
995
+ lang_output = language_hidden_states[-1]
996
+ pooled_output = self.pooler(lang_output)
997
+
998
+ if not return_dict:
999
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
1000
+
1001
+ return LxmertModelOutput(
1002
+ pooled_output=pooled_output,
1003
+ language_output=lang_output,
1004
+ vision_output=visual_output,
1005
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
1006
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
1007
+ language_attentions=language_attentions if output_attentions else None,
1008
+ vision_attentions=vision_attentions if output_attentions else None,
1009
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
1010
+ )
1011
+
1012
+
1013
+ @add_start_docstrings(
1014
+ """Lxmert Model with a specified pretraining head on top.""",
1015
+ LXMERT_START_DOCSTRING,
1016
+ )
1017
+ class LxmertForPreTraining(LxmertPreTrainedModel):
1018
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
1019
+
1020
+ def __init__(self, config):
1021
+ super().__init__(config)
1022
+ # Configuration
1023
+ self.config = config
1024
+ self.num_qa_labels = config.num_qa_labels
1025
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1026
+
1027
+ # Use of pretraining tasks
1028
+ self.task_mask_lm = config.task_mask_lm
1029
+ self.task_obj_predict = config.task_obj_predict
1030
+ self.task_matched = config.task_matched
1031
+ self.task_qa = config.task_qa
1032
+
1033
+ # Lxmert backbone
1034
+ self.lxmert = LxmertModel(config)
1035
+
1036
+ # Pre-training heads
1037
+ self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
1038
+ if self.task_obj_predict:
1039
+ self.obj_predict_head = LxmertVisualObjHead(config)
1040
+ if self.task_qa:
1041
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1042
+
1043
+ # Weight initialization
1044
+ # Initialize weights and apply final processing
1045
+ self.post_init()
1046
+
1047
+ # Loss functions
1048
+ self.loss_fcts = {
1049
+ "l2": SmoothL1Loss(reduction="none"),
1050
+ "visual_ce": CrossEntropyLoss(reduction="none"),
1051
+ "ce": CrossEntropyLoss(),
1052
+ }
1053
+
1054
+ visual_losses = {}
1055
+ if config.visual_obj_loss:
1056
+ visual_losses["obj"] = {
1057
+ "shape": (-1,),
1058
+ "num": config.num_object_labels,
1059
+ "loss": "visual_ce",
1060
+ }
1061
+ if config.visual_attr_loss:
1062
+ visual_losses["attr"] = {
1063
+ "shape": (-1,),
1064
+ "num": config.num_attr_labels,
1065
+ "loss": "visual_ce",
1066
+ }
1067
+ if config.visual_feat_loss:
1068
+ visual_losses["feat"] = {
1069
+ "shape": (-1, config.visual_feat_dim),
1070
+ "num": config.visual_feat_dim,
1071
+ "loss": "l2",
1072
+ }
1073
+ self.visual_losses = visual_losses
1074
+
1075
+ def resize_num_qa_labels(self, num_labels):
1076
+ """
1077
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1078
+ will add newly initialized weights. Reducing the size will remove weights from the end
1079
+
1080
+ Args:
1081
+ num_labels (`int`, *optional*):
1082
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1083
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1084
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1085
+
1086
+ Return:
1087
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1088
+ """
1089
+
1090
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1091
+ if num_labels is None or cur_qa_logit_layer is None:
1092
+ return
1093
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1094
+ self.config.num_qa_labels = num_labels
1095
+ self.num_qa_labels = num_labels
1096
+
1097
+ return new_qa_logit_layer
1098
+
1099
+ def _resize_qa_labels(self, num_labels):
1100
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1101
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1102
+ self._set_qa_logit_layer(new_qa_logit_layer)
1103
+ return self.get_qa_logit_layer()
1104
+
1105
+ def get_qa_logit_layer(self) -> nn.Module:
1106
+ """
1107
+ Returns the linear layer that produces question answering logits.
1108
+
1109
+ Returns:
1110
+ `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT
1111
+ does not have a visual answering head.
1112
+ """
1113
+ if hasattr(self, "answer_head"):
1114
+ return self.answer_head.logit_fc[-1]
1115
+
1116
+ def _set_qa_logit_layer(self, qa_logit_layer):
1117
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1118
+
1119
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1120
+ if num_labels is None:
1121
+ return cur_qa_logit_layer
1122
+
1123
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1124
+ if cur_qa_labels == num_labels:
1125
+ return cur_qa_logit_layer
1126
+
1127
+ # Build new linear output
1128
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1129
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1130
+ else:
1131
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1132
+
1133
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1134
+
1135
+ # initialize all new labels
1136
+ self._init_weights(new_qa_logit_layer)
1137
+
1138
+ # Copy labels from the previous weights
1139
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1140
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1141
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1142
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1143
+
1144
+ return new_qa_logit_layer
1145
+
1146
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1147
+ @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1148
+ def forward(
1149
+ self,
1150
+ input_ids: Optional[torch.LongTensor] = None,
1151
+ visual_feats: Optional[torch.FloatTensor] = None,
1152
+ visual_pos: Optional[torch.FloatTensor] = None,
1153
+ attention_mask: Optional[torch.FloatTensor] = None,
1154
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1155
+ token_type_ids: Optional[torch.LongTensor] = None,
1156
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1157
+ labels: Optional[torch.LongTensor] = None,
1158
+ obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None,
1159
+ matched_label: Optional[torch.LongTensor] = None,
1160
+ ans: Optional[torch.Tensor] = None,
1161
+ output_attentions: Optional[bool] = None,
1162
+ output_hidden_states: Optional[bool] = None,
1163
+ return_dict: Optional[bool] = None,
1164
+ **kwargs,
1165
+ ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]:
1166
+ r"""
1167
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1168
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1169
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1170
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1171
+ obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*):
1172
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1173
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1174
+ the label score respectively
1175
+ matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1176
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1177
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1178
+
1179
+ - 0 indicates that the sentence does not match the image,
1180
+ - 1 indicates that the sentence does match the image.
1181
+ ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1182
+ a one hot representation hof the correct answer *optional*
1183
+
1184
+ Returns:
1185
+ """
1186
+
1187
+ if "masked_lm_labels" in kwargs:
1188
+ warnings.warn(
1189
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`"
1190
+ " instead.",
1191
+ FutureWarning,
1192
+ )
1193
+ labels = kwargs.pop("masked_lm_labels")
1194
+
1195
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1196
+
1197
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1198
+ lxmert_output = self.lxmert(
1199
+ input_ids=input_ids,
1200
+ visual_feats=visual_feats,
1201
+ visual_pos=visual_pos,
1202
+ token_type_ids=token_type_ids,
1203
+ attention_mask=attention_mask,
1204
+ visual_attention_mask=visual_attention_mask,
1205
+ inputs_embeds=inputs_embeds,
1206
+ output_hidden_states=output_hidden_states,
1207
+ output_attentions=output_attentions,
1208
+ return_dict=return_dict,
1209
+ )
1210
+
1211
+ lang_output, visual_output, pooled_output = (
1212
+ lxmert_output[0],
1213
+ lxmert_output[1],
1214
+ lxmert_output[2],
1215
+ )
1216
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1217
+ if self.task_qa:
1218
+ answer_score = self.answer_head(pooled_output)
1219
+ else:
1220
+ answer_score = pooled_output[0][0]
1221
+
1222
+ total_loss = (
1223
+ None
1224
+ if (labels is None and matched_label is None and obj_labels is None and ans is None)
1225
+ else torch.tensor(0.0, device=device)
1226
+ )
1227
+ if labels is not None and self.task_mask_lm:
1228
+ masked_lm_loss = self.loss_fcts["ce"](
1229
+ lang_prediction_scores.view(-1, self.config.vocab_size),
1230
+ labels.view(-1),
1231
+ )
1232
+ total_loss += masked_lm_loss
1233
+ if matched_label is not None and self.task_matched:
1234
+ matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
1235
+ total_loss += matched_loss
1236
+ if obj_labels is not None and self.task_obj_predict:
1237
+ total_visual_loss = torch.tensor(0.0, device=input_ids.device)
1238
+ visual_prediction_scores_dict = self.obj_predict_head(visual_output)
1239
+ for key, key_info in self.visual_losses.items():
1240
+ label, mask_conf = obj_labels[key]
1241
+ output_dim = key_info["num"]
1242
+ loss_fct_name = key_info["loss"]
1243
+ label_shape = key_info["shape"]
1244
+ weight = self.visual_loss_normalizer
1245
+ visual_loss_fct = self.loss_fcts[loss_fct_name]
1246
+ visual_prediction_scores = visual_prediction_scores_dict[key]
1247
+ visual_loss = visual_loss_fct(
1248
+ visual_prediction_scores.view(-1, output_dim),
1249
+ label.view(label_shape),
1250
+ )
1251
+ if visual_loss.dim() > 1: # Regression Losses
1252
+ visual_loss = visual_loss.mean(1)
1253
+ visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
1254
+ total_visual_loss += visual_loss
1255
+ total_loss += total_visual_loss
1256
+ if ans is not None and self.task_qa:
1257
+ answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
1258
+ total_loss += answer_loss
1259
+
1260
+ if not return_dict:
1261
+ output = (
1262
+ lang_prediction_scores,
1263
+ cross_relationship_score,
1264
+ answer_score,
1265
+ ) + lxmert_output[3:]
1266
+ return ((total_loss,) + output) if total_loss is not None else output
1267
+
1268
+ return LxmertForPreTrainingOutput(
1269
+ loss=total_loss,
1270
+ prediction_logits=lang_prediction_scores,
1271
+ cross_relationship_score=cross_relationship_score,
1272
+ question_answering_score=answer_score,
1273
+ language_hidden_states=lxmert_output.language_hidden_states,
1274
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1275
+ language_attentions=lxmert_output.language_attentions,
1276
+ vision_attentions=lxmert_output.vision_attentions,
1277
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1278
+ )
1279
+
1280
+
1281
+ @add_start_docstrings(
1282
+ """Lxmert Model with a visual-answering head on top for downstream QA tasks""",
1283
+ LXMERT_START_DOCSTRING,
1284
+ )
1285
+ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
1286
+ def __init__(self, config):
1287
+ super().__init__(config)
1288
+ # Configuration
1289
+ self.config = config
1290
+ self.num_qa_labels = config.num_qa_labels
1291
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1292
+
1293
+ # Lxmert backbone
1294
+ self.lxmert = LxmertModel(config)
1295
+
1296
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1297
+
1298
+ # Weight initialization
1299
+ # Initialize weights and apply final processing
1300
+ self.post_init()
1301
+
1302
+ # Loss function
1303
+ self.loss = CrossEntropyLoss()
1304
+
1305
+ def resize_num_qa_labels(self, num_labels):
1306
+ """
1307
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1308
+ will add newly initialized weights. Reducing the size will remove weights from the end
1309
+
1310
+ Args:
1311
+ num_labels (`int`, *optional*):
1312
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1313
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1314
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1315
+
1316
+ Return:
1317
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1318
+ """
1319
+
1320
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1321
+ if num_labels is None or cur_qa_logit_layer is None:
1322
+ return
1323
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1324
+ self.config.num_qa_labels = num_labels
1325
+ self.num_qa_labels = num_labels
1326
+
1327
+ return new_qa_logit_layer
1328
+
1329
+ def _resize_qa_labels(self, num_labels):
1330
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1331
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1332
+ self._set_qa_logit_layer(new_qa_logit_layer)
1333
+ return self.get_qa_logit_layer()
1334
+
1335
+ def get_qa_logit_layer(self) -> nn.Module:
1336
+ """
1337
+ Returns the linear layer that produces question answering logits
1338
+
1339
+ Returns:
1340
+ `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType
1341
+ object if Lxmert does not have the visual answering head.
1342
+ """
1343
+
1344
+ if hasattr(self, "answer_head"):
1345
+ return self.answer_head.logit_fc[-1]
1346
+
1347
+ def _set_qa_logit_layer(self, qa_logit_layer):
1348
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1349
+
1350
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1351
+ if num_labels is None:
1352
+ return cur_qa_logit_layer
1353
+
1354
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1355
+ if cur_qa_labels == num_labels:
1356
+ return cur_qa_logit_layer
1357
+
1358
+ # Build new linear output
1359
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1360
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1361
+ else:
1362
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1363
+
1364
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1365
+
1366
+ # initialize all new labels
1367
+ self._init_weights(new_qa_logit_layer)
1368
+
1369
+ # Copy labels from the previous weights
1370
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1371
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1372
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1373
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1374
+
1375
+ return new_qa_logit_layer
1376
+
1377
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1378
+ @add_code_sample_docstrings(
1379
+ checkpoint=_CHECKPOINT_FOR_DOC,
1380
+ output_type=LxmertForQuestionAnsweringOutput,
1381
+ config_class=_CONFIG_FOR_DOC,
1382
+ )
1383
+ def forward(
1384
+ self,
1385
+ input_ids: Optional[torch.LongTensor] = None,
1386
+ visual_feats: Optional[torch.FloatTensor] = None,
1387
+ visual_pos: Optional[torch.FloatTensor] = None,
1388
+ attention_mask: Optional[torch.FloatTensor] = None,
1389
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1390
+ token_type_ids: Optional[torch.LongTensor] = None,
1391
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1392
+ labels: Optional[torch.Tensor] = None,
1393
+ output_attentions: Optional[bool] = None,
1394
+ output_hidden_states: Optional[bool] = None,
1395
+ return_dict: Optional[bool] = None,
1396
+ ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]:
1397
+ r"""
1398
+ labels (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1399
+ A one-hot representation of the correct answer
1400
+ """
1401
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1402
+
1403
+ lxmert_output = self.lxmert(
1404
+ input_ids=input_ids,
1405
+ visual_feats=visual_feats,
1406
+ visual_pos=visual_pos,
1407
+ token_type_ids=token_type_ids,
1408
+ attention_mask=attention_mask,
1409
+ visual_attention_mask=visual_attention_mask,
1410
+ inputs_embeds=inputs_embeds,
1411
+ output_hidden_states=output_hidden_states,
1412
+ output_attentions=output_attentions,
1413
+ return_dict=return_dict,
1414
+ )
1415
+
1416
+ pooled_output = lxmert_output[2]
1417
+ answer_score = self.answer_head(pooled_output)
1418
+ loss = None
1419
+ if labels is not None:
1420
+ loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
1421
+
1422
+ if not return_dict:
1423
+ output = (answer_score,) + lxmert_output[3:]
1424
+ return (loss,) + output if loss is not None else output
1425
+
1426
+ return LxmertForQuestionAnsweringOutput(
1427
+ loss=loss,
1428
+ question_answering_score=answer_score,
1429
+ language_hidden_states=lxmert_output.language_hidden_states,
1430
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1431
+ language_attentions=lxmert_output.language_attentions,
1432
+ vision_attentions=lxmert_output.vision_attentions,
1433
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1434
+ )
venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py ADDED
@@ -0,0 +1,1656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the
3
+ # Lxmert Authors.
4
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ TF 2.0 LXMERT model."""
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import tensorflow as tf
28
+
29
+ from ...activations_tf import get_tf_activation
30
+ from ...modeling_tf_utils import (
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ get_initializer,
34
+ keras,
35
+ keras_serializable,
36
+ shape_list,
37
+ unpack_inputs,
38
+ )
39
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_lxmert import LxmertConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
54
+ _CONFIG_FOR_DOC = "LxmertConfig"
55
+
56
+
57
+ from ..deprecated._archive_maps import TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ @dataclass
61
+ class TFLxmertModelOutput(ModelOutput):
62
+ """
63
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
64
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
65
+ encoder")
66
+
67
+
68
+ Args:
69
+ language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
70
+ Sequence of hidden-states at the output of the last layer of the language encoder.
71
+ vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
72
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
73
+ pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
74
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
75
+ by a Linear layer and a Tanh activation function. The Linear
76
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
77
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
78
+ `(batch_size, sequence_length, hidden_size)`.
79
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
80
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
81
+ `(batch_size, sequence_length, hidden_size)`.
82
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
83
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
84
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
85
+ the self-attention heads.
86
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
87
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
88
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
89
+ the self-attention heads.
90
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
91
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
92
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
93
+ the self-attention heads.
94
+ """
95
+
96
+ language_output: tf.Tensor | None = None
97
+ vision_output: tf.Tensor | None = None
98
+ pooled_output: tf.Tensor | None = None
99
+ language_hidden_states: Tuple[tf.Tensor] | None = None
100
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
101
+ language_attentions: Tuple[tf.Tensor] | None = None
102
+ vision_attentions: Tuple[tf.Tensor] | None = None
103
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
104
+
105
+
106
+ @dataclass
107
+ class TFLxmertForPreTrainingOutput(ModelOutput):
108
+ """
109
+ Output type of [`LxmertForPreTraining`].
110
+
111
+ Args:
112
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
113
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
114
+ (classification) loss.
115
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
116
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
117
+ cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`):
118
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
119
+ continuation before SoftMax).
120
+ question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`):
121
+ Prediction scores of question answering objective (classification).
122
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
124
+ `(batch_size, sequence_length, hidden_size)`.
125
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
126
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
127
+ `(batch_size, sequence_length, hidden_size)`.
128
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
129
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
130
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
131
+ the self-attention heads.
132
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
133
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
134
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
135
+ the self-attention heads.
136
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
137
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
138
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
139
+ the self-attention heads.
140
+
141
+ """
142
+
143
+ loss: tf.Tensor | None = None
144
+ prediction_logits: tf.Tensor | None = None
145
+ cross_relationship_score: tf.Tensor | None = None
146
+ question_answering_score: tf.Tensor | None = None
147
+ language_hidden_states: Tuple[tf.Tensor] | None = None
148
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
149
+ language_attentions: Tuple[tf.Tensor] | None = None
150
+ vision_attentions: Tuple[tf.Tensor] | None = None
151
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
152
+
153
+
154
+ class TFLxmertVisualFeatureEncoder(keras.layers.Layer):
155
+ def __init__(self, config, **kwargs):
156
+ super().__init__(**kwargs)
157
+
158
+ # Object feature encoding
159
+ self.visn_fc = keras.layers.Dense(
160
+ config.hidden_size,
161
+ kernel_initializer=get_initializer(config.initializer_range),
162
+ name="visn_fc",
163
+ )
164
+ self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm")
165
+
166
+ # Box position encoding
167
+ self.box_fc = keras.layers.Dense(
168
+ config.hidden_size,
169
+ kernel_initializer=get_initializer(config.initializer_range),
170
+ name="box_fc",
171
+ )
172
+ self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm")
173
+
174
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
175
+ self.feat_dim = config.visual_feat_dim
176
+ self.pos_dim = config.visual_pos_dim
177
+ self.config = config
178
+
179
+ def call(self, visn_input, training=False):
180
+ feats, boxes = visn_input
181
+
182
+ x = self.visn_fc(feats)
183
+ x = self.visn_layer_norm(x)
184
+ y = self.box_fc(boxes)
185
+ y = self.box_layer_norm(y)
186
+ output = (x + y) / 2
187
+
188
+ output = self.dropout(output, training=training)
189
+ return output
190
+
191
+ def build(self, input_shape=None):
192
+ if self.built:
193
+ return
194
+ self.built = True
195
+ if getattr(self, "visn_fc", None) is not None:
196
+ with tf.name_scope(self.visn_fc.name):
197
+ self.visn_fc.build([None, None, self.feat_dim])
198
+ if getattr(self, "visn_layer_norm", None) is not None:
199
+ with tf.name_scope(self.visn_layer_norm.name):
200
+ self.visn_layer_norm.build([None, None, self.config.hidden_size])
201
+ if getattr(self, "box_fc", None) is not None:
202
+ with tf.name_scope(self.box_fc.name):
203
+ self.box_fc.build([None, None, self.pos_dim])
204
+ if getattr(self, "box_layer_norm", None) is not None:
205
+ with tf.name_scope(self.box_layer_norm.name):
206
+ self.box_layer_norm.build([None, None, self.config.hidden_size])
207
+
208
+
209
+ class TFLxmertEmbeddings(keras.layers.Layer):
210
+ """Construct the embeddings from word, position and token_type embeddings."""
211
+
212
+ def __init__(self, config, **kwargs):
213
+ super().__init__(**kwargs)
214
+
215
+ self.config = config
216
+ self.hidden_size = config.hidden_size
217
+ self.max_position_embeddings = config.max_position_embeddings
218
+ self.initializer_range = config.initializer_range
219
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
220
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
221
+
222
+ def build(self, input_shape=None):
223
+ with tf.name_scope("word_embeddings"):
224
+ self.weight = self.add_weight(
225
+ name="weight",
226
+ shape=[self.config.vocab_size, self.hidden_size],
227
+ initializer=get_initializer(initializer_range=self.initializer_range),
228
+ )
229
+
230
+ with tf.name_scope("token_type_embeddings"):
231
+ self.token_type_embeddings = self.add_weight(
232
+ name="embeddings",
233
+ shape=[self.config.type_vocab_size, self.hidden_size],
234
+ initializer=get_initializer(initializer_range=self.initializer_range),
235
+ )
236
+
237
+ with tf.name_scope("position_embeddings"):
238
+ self.position_embeddings = self.add_weight(
239
+ name="embeddings",
240
+ shape=[self.max_position_embeddings, self.hidden_size],
241
+ initializer=get_initializer(initializer_range=self.initializer_range),
242
+ )
243
+
244
+ if self.built:
245
+ return
246
+ self.built = True
247
+ if getattr(self, "LayerNorm", None) is not None:
248
+ with tf.name_scope(self.LayerNorm.name):
249
+ self.LayerNorm.build([None, None, self.config.hidden_size])
250
+
251
+ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
252
+ """
253
+ Applies embedding based on inputs tensor.
254
+
255
+ Returns:
256
+ final_embeddings (`tf.Tensor`): output embedding tensor.
257
+ """
258
+ assert not (input_ids is None and inputs_embeds is None)
259
+
260
+ if input_ids is not None:
261
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
262
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
263
+
264
+ input_shape = shape_list(inputs_embeds)[:-1]
265
+
266
+ if token_type_ids is None:
267
+ token_type_ids = tf.fill(dims=input_shape, value=0)
268
+
269
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
270
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
271
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
272
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
273
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
274
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
275
+
276
+ return final_embeddings
277
+
278
+
279
+ class TFLxmertAttention(keras.layers.Layer):
280
+ def __init__(self, config, **kwargs):
281
+ super().__init__(**kwargs)
282
+ if config.hidden_size % config.num_attention_heads != 0:
283
+ raise ValueError(
284
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
285
+ f"heads ({config.num_attention_heads}"
286
+ )
287
+
288
+ self.num_attention_heads = config.num_attention_heads
289
+ assert config.hidden_size % config.num_attention_heads == 0
290
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
291
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
292
+
293
+ self.query = keras.layers.Dense(
294
+ self.all_head_size,
295
+ kernel_initializer=get_initializer(config.initializer_range),
296
+ name="query",
297
+ )
298
+ self.key = keras.layers.Dense(
299
+ self.all_head_size,
300
+ kernel_initializer=get_initializer(config.initializer_range),
301
+ name="key",
302
+ )
303
+ self.value = keras.layers.Dense(
304
+ self.all_head_size,
305
+ kernel_initializer=get_initializer(config.initializer_range),
306
+ name="value",
307
+ )
308
+
309
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
310
+ self.ctx_dim = config.hidden_size
311
+ self.config = config
312
+
313
+ def transpose_for_scores(self, x, batch_size):
314
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
315
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
316
+ return tf.transpose(x, perm=[0, 2, 1, 3])
317
+
318
+ def call(self, hidden_states, context, attention_mask, output_attentions, training=False):
319
+ batch_size = shape_list(hidden_states)[0]
320
+ mixed_query_layer = self.query(hidden_states)
321
+ mixed_key_layer = self.key(context)
322
+ mixed_value_layer = self.value(context)
323
+
324
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
325
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
326
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
327
+
328
+ # Take the dot product between "query" and "key" to get the raw attention scores.
329
+ attention_scores = tf.matmul(
330
+ query_layer, key_layer, transpose_b=True
331
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
332
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
333
+ attention_scores = attention_scores / tf.math.sqrt(dk)
334
+
335
+ if attention_mask is not None:
336
+ # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function)
337
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
338
+ attention_scores = attention_scores + attention_mask
339
+
340
+ # Normalize the attention scores to probabilities.
341
+ attention_probs = stable_softmax(attention_scores, axis=-1)
342
+
343
+ # This is actually dropping out entire tokens to attend to, which might
344
+ # seem a bit unusual, but is taken from the original Transformer paper.
345
+ attention_probs = self.dropout(attention_probs, training=training)
346
+ context_layer = tf.matmul(attention_probs, value_layer)
347
+
348
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
349
+ context_layer = tf.reshape(
350
+ context_layer, (batch_size, -1, self.all_head_size)
351
+ ) # (batch_size, seq_len_q, all_head_size)
352
+
353
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
354
+ return outputs
355
+
356
+ def build(self, input_shape=None):
357
+ if self.built:
358
+ return
359
+ self.built = True
360
+ if getattr(self, "query", None) is not None:
361
+ with tf.name_scope(self.query.name):
362
+ self.query.build([None, None, self.config.hidden_size])
363
+ if getattr(self, "key", None) is not None:
364
+ with tf.name_scope(self.key.name):
365
+ self.key.build([None, None, self.ctx_dim])
366
+ if getattr(self, "value", None) is not None:
367
+ with tf.name_scope(self.value.name):
368
+ self.value.build([None, None, self.ctx_dim])
369
+
370
+
371
+ class TFLxmertIntermediate(keras.layers.Layer):
372
+ def __init__(self, config, **kwargs):
373
+ super().__init__(**kwargs)
374
+ self.dense = keras.layers.Dense(
375
+ config.intermediate_size,
376
+ kernel_initializer=get_initializer(config.initializer_range),
377
+ name="dense",
378
+ )
379
+ if isinstance(config.hidden_act, str):
380
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
381
+ else:
382
+ self.intermediate_act_fn = config.hidden_act
383
+ self.config = config
384
+
385
+ def call(self, hidden_states):
386
+ hidden_states = self.dense(hidden_states)
387
+ hidden_states = self.intermediate_act_fn(hidden_states)
388
+ return hidden_states
389
+
390
+ def build(self, input_shape=None):
391
+ if self.built:
392
+ return
393
+ self.built = True
394
+ if getattr(self, "dense", None) is not None:
395
+ with tf.name_scope(self.dense.name):
396
+ self.dense.build([None, None, self.config.hidden_size])
397
+
398
+
399
+ class TFLxmertOutput(keras.layers.Layer):
400
+ def __init__(self, config, **kwargs):
401
+ super().__init__(**kwargs)
402
+ self.dense = keras.layers.Dense(
403
+ config.hidden_size,
404
+ kernel_initializer=get_initializer(config.initializer_range),
405
+ name="dense",
406
+ )
407
+
408
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
409
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
410
+ self.config = config
411
+
412
+ def call(self, hidden_states, input_tensor, training=False):
413
+ hidden_states = self.dense(hidden_states)
414
+ hidden_states = self.dropout(hidden_states, training)
415
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
416
+ return hidden_states
417
+
418
+ def build(self, input_shape=None):
419
+ if self.built:
420
+ return
421
+ self.built = True
422
+ if getattr(self, "dense", None) is not None:
423
+ with tf.name_scope(self.dense.name):
424
+ self.dense.build([None, None, self.config.intermediate_size])
425
+ if getattr(self, "LayerNorm", None) is not None:
426
+ with tf.name_scope(self.LayerNorm.name):
427
+ self.LayerNorm.build([None, None, self.config.hidden_size])
428
+
429
+
430
+ class TFLxmertAttentionOutput(keras.layers.Layer):
431
+ def __init__(self, config, **kwargs):
432
+ super().__init__(**kwargs)
433
+ self.dense = keras.layers.Dense(
434
+ config.hidden_size,
435
+ kernel_initializer=get_initializer(config.initializer_range),
436
+ name="dense",
437
+ )
438
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
439
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
440
+ self.config = config
441
+
442
+ def call(self, hidden_states, input_tensor, training=False):
443
+ hidden_states = self.dense(hidden_states)
444
+ hidden_states = self.dropout(hidden_states, training=training)
445
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
446
+ return hidden_states
447
+
448
+ def build(self, input_shape=None):
449
+ if self.built:
450
+ return
451
+ self.built = True
452
+ if getattr(self, "dense", None) is not None:
453
+ with tf.name_scope(self.dense.name):
454
+ self.dense.build([None, None, self.config.hidden_size])
455
+ if getattr(self, "LayerNorm", None) is not None:
456
+ with tf.name_scope(self.LayerNorm.name):
457
+ self.LayerNorm.build([None, None, self.config.hidden_size])
458
+
459
+
460
+ class TFLxmertSelfAttentionLayer(keras.layers.Layer):
461
+ def __init__(self, config, **kwargs):
462
+ super().__init__(**kwargs)
463
+ self.self = TFLxmertAttention(config, name="self")
464
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
465
+
466
+ def call(self, input_tensor, attention_mask, output_attentions, training=False):
467
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
468
+ self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions)
469
+ if output_attentions:
470
+ attention_probs = self_output[1]
471
+ attention_output = self.attention_output(self_output[0], input_tensor)
472
+ return (attention_output, attention_probs) if output_attentions else (attention_output,)
473
+
474
+ def build(self, input_shape=None):
475
+ if self.built:
476
+ return
477
+ self.built = True
478
+ if getattr(self, "self", None) is not None:
479
+ with tf.name_scope(self.self.name):
480
+ self.self.build(None)
481
+ if getattr(self, "attention_output", None) is not None:
482
+ with tf.name_scope(self.attention_output.name):
483
+ self.attention_output.build(None)
484
+
485
+
486
+ class TFLxmertCrossAttentionLayer(keras.layers.Layer):
487
+ def __init__(self, config, **kwargs):
488
+ super().__init__(**kwargs)
489
+ self.att = TFLxmertAttention(config, name="att")
490
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
491
+
492
+ def call(
493
+ self,
494
+ input_tensor,
495
+ ctx_tensor,
496
+ ctx_att_mask,
497
+ output_attentions=False,
498
+ training=False,
499
+ ):
500
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training)
501
+ if output_attentions:
502
+ attention_probs = output[1]
503
+ attention_output = self.attention_output(output[0], input_tensor, training=training)
504
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
505
+ return outputs
506
+
507
+ def build(self, input_shape=None):
508
+ if self.built:
509
+ return
510
+ self.built = True
511
+ if getattr(self, "att", None) is not None:
512
+ with tf.name_scope(self.att.name):
513
+ self.att.build(None)
514
+ if getattr(self, "attention_output", None) is not None:
515
+ with tf.name_scope(self.attention_output.name):
516
+ self.attention_output.build(None)
517
+
518
+
519
+ class TFLxmertLayer(keras.layers.Layer):
520
+ def __init__(self, config, **kwargs):
521
+ super().__init__(**kwargs)
522
+ self.attention = TFLxmertSelfAttentionLayer(config, name="attention")
523
+ self.intermediate = TFLxmertIntermediate(config, name="intermediate")
524
+ self.transformer_output = TFLxmertOutput(config, name="output")
525
+
526
+ def call(self, hidden_states, attention_mask, output_attentions, training=False):
527
+ attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training)
528
+ attention_output = attention_outputs[0]
529
+ intermediate_output = self.intermediate(attention_output)
530
+ layer_output = self.transformer_output(intermediate_output, attention_output, training=training)
531
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
532
+ return outputs
533
+
534
+ def build(self, input_shape=None):
535
+ if self.built:
536
+ return
537
+ self.built = True
538
+ if getattr(self, "attention", None) is not None:
539
+ with tf.name_scope(self.attention.name):
540
+ self.attention.build(None)
541
+ if getattr(self, "intermediate", None) is not None:
542
+ with tf.name_scope(self.intermediate.name):
543
+ self.intermediate.build(None)
544
+ if getattr(self, "transformer_output", None) is not None:
545
+ with tf.name_scope(self.transformer_output.name):
546
+ self.transformer_output.build(None)
547
+
548
+
549
+ class TFLxmertXLayer(keras.layers.Layer):
550
+ def __init__(self, config, **kwargs):
551
+ super().__init__(**kwargs)
552
+ self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention")
553
+
554
+ # Self-attention Layers
555
+ self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att")
556
+ self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att")
557
+
558
+ # Intermediate and Output Layers (FFNs)
559
+ self.lang_inter = TFLxmertIntermediate(config, name="lang_inter")
560
+ self.lang_output = TFLxmertOutput(config, name="lang_output")
561
+ self.visn_inter = TFLxmertIntermediate(config, name="visn_inter")
562
+ self.visn_output = TFLxmertOutput(config, name="visn_output")
563
+
564
+ def cross_att(
565
+ self,
566
+ lang_input,
567
+ lang_attention_mask,
568
+ visn_input,
569
+ visn_attention_mask,
570
+ output_attentions,
571
+ training=False,
572
+ ):
573
+ # Cross Attention
574
+
575
+ # Keras saving and loading model *does not work* with the same inputs for two layers.
576
+ lang_attention_lang_input = tf.identity(lang_input)
577
+ visn_attention_lang_input = tf.identity(lang_input)
578
+ lang_attention_visn_input = tf.identity(visn_input)
579
+ visn_attention_visn_input = tf.identity(visn_input)
580
+
581
+ lang_att_output = self.visual_attention(
582
+ lang_attention_lang_input,
583
+ lang_attention_visn_input,
584
+ visn_attention_mask,
585
+ output_attentions=output_attentions,
586
+ training=training,
587
+ )
588
+ visn_att_output = self.visual_attention(
589
+ visn_attention_visn_input,
590
+ visn_attention_lang_input,
591
+ lang_attention_mask,
592
+ output_attentions=output_attentions,
593
+ training=training,
594
+ )
595
+ return lang_att_output, visn_att_output
596
+
597
+ def self_att(
598
+ self,
599
+ lang_input,
600
+ lang_attention_mask,
601
+ visn_input,
602
+ visn_attention_mask,
603
+ training=False,
604
+ ):
605
+ # Self Attention
606
+ output_attentions = False
607
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training)
608
+ visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training)
609
+ return lang_att_output[0], visn_att_output[0]
610
+
611
+ def output_fc(self, lang_input, visn_input, training=False):
612
+ # FC layers
613
+ lang_inter_output = self.lang_inter(lang_input)
614
+ visn_inter_output = self.visn_inter(visn_input)
615
+
616
+ # Layer output
617
+ lang_output = self.lang_output(lang_inter_output, lang_input, training)
618
+ visn_output = self.visn_output(visn_inter_output, visn_input, training)
619
+ return lang_output, visn_output
620
+
621
+ def call(
622
+ self,
623
+ lang_feats,
624
+ lang_attention_mask,
625
+ visn_feats,
626
+ visn_attention_mask,
627
+ output_attentions,
628
+ training=False,
629
+ ):
630
+ lang_att_output = lang_feats
631
+ visn_att_output = visn_feats
632
+
633
+ lang_att_output, visn_att_output = self.cross_att(
634
+ lang_att_output,
635
+ lang_attention_mask,
636
+ visn_att_output,
637
+ visn_attention_mask,
638
+ output_attentions,
639
+ training=training,
640
+ )
641
+ attention_probs = lang_att_output[1:]
642
+ lang_att_output, visn_att_output = self.self_att(
643
+ lang_att_output[0],
644
+ lang_attention_mask,
645
+ visn_att_output[0],
646
+ visn_attention_mask,
647
+ training=training,
648
+ )
649
+ lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training)
650
+
651
+ return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output)
652
+
653
+ def build(self, input_shape=None):
654
+ if self.built:
655
+ return
656
+ self.built = True
657
+ if getattr(self, "visual_attention", None) is not None:
658
+ with tf.name_scope(self.visual_attention.name):
659
+ self.visual_attention.build(None)
660
+ if getattr(self, "lang_self_att", None) is not None:
661
+ with tf.name_scope(self.lang_self_att.name):
662
+ self.lang_self_att.build(None)
663
+ if getattr(self, "visn_self_att", None) is not None:
664
+ with tf.name_scope(self.visn_self_att.name):
665
+ self.visn_self_att.build(None)
666
+ if getattr(self, "lang_inter", None) is not None:
667
+ with tf.name_scope(self.lang_inter.name):
668
+ self.lang_inter.build(None)
669
+ if getattr(self, "lang_output", None) is not None:
670
+ with tf.name_scope(self.lang_output.name):
671
+ self.lang_output.build(None)
672
+ if getattr(self, "visn_inter", None) is not None:
673
+ with tf.name_scope(self.visn_inter.name):
674
+ self.visn_inter.build(None)
675
+ if getattr(self, "visn_output", None) is not None:
676
+ with tf.name_scope(self.visn_output.name):
677
+ self.visn_output.build(None)
678
+
679
+
680
+ class TFLxmertEncoder(keras.layers.Layer):
681
+ def __init__(self, config, **kwargs):
682
+ super().__init__(**kwargs)
683
+
684
+ self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc")
685
+
686
+ # Number of layers
687
+ self.num_l_layers = config.l_layers
688
+ self.num_x_layers = config.x_layers
689
+ self.num_r_layers = config.r_layers
690
+
691
+ # Layers
692
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
693
+ self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)]
694
+ self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)]
695
+ self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)]
696
+ self.config = config
697
+
698
+ def call(
699
+ self,
700
+ lang_feats=None,
701
+ lang_attention_mask=None,
702
+ visual_feats=None,
703
+ visual_pos=None,
704
+ visual_attention_mask=None,
705
+ output_attentions=None,
706
+ training=False,
707
+ ):
708
+ vision_hidden_states = ()
709
+ language_hidden_states = ()
710
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
711
+ language_attentions = () if output_attentions or self.config.output_attentions else None
712
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
713
+
714
+ visual_feats = self.visn_fc([visual_feats, visual_pos], training=training)
715
+
716
+ # Run language layers
717
+ for layer_module in self.layer:
718
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training)
719
+ lang_feats = l_outputs[0]
720
+ language_hidden_states = language_hidden_states + (lang_feats,)
721
+ if language_attentions is not None:
722
+ language_attentions = language_attentions + (l_outputs[1],)
723
+
724
+ # Run relational layers
725
+ for layer_module in self.r_layers:
726
+ v_outputs = layer_module(
727
+ visual_feats,
728
+ visual_attention_mask,
729
+ output_attentions,
730
+ training=training,
731
+ )
732
+ visual_feats = v_outputs[0]
733
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
734
+ if vision_attentions is not None:
735
+ vision_attentions = vision_attentions + (v_outputs[1],)
736
+
737
+ # Run cross-modality layers
738
+ for layer_module in self.x_layers:
739
+ x_outputs = layer_module(
740
+ lang_feats,
741
+ lang_attention_mask,
742
+ visual_feats,
743
+ visual_attention_mask,
744
+ output_attentions,
745
+ training=training,
746
+ )
747
+ lang_feats, visual_feats = x_outputs[:2]
748
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
749
+ language_hidden_states = language_hidden_states + (lang_feats,)
750
+ if cross_encoder_attentions is not None:
751
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
752
+
753
+ visual_encoder_outputs = (
754
+ vision_hidden_states,
755
+ vision_attentions if output_attentions else None,
756
+ )
757
+ lang_encoder_outputs = (
758
+ language_hidden_states,
759
+ language_attentions if output_attentions else None,
760
+ )
761
+
762
+ return (
763
+ visual_encoder_outputs,
764
+ lang_encoder_outputs,
765
+ cross_encoder_attentions if output_attentions else None,
766
+ )
767
+
768
+ def build(self, input_shape=None):
769
+ if self.built:
770
+ return
771
+ self.built = True
772
+ if getattr(self, "visn_fc", None) is not None:
773
+ with tf.name_scope(self.visn_fc.name):
774
+ self.visn_fc.build(None)
775
+ if getattr(self, "layer", None) is not None:
776
+ for layer in self.layer:
777
+ with tf.name_scope(layer.name):
778
+ layer.build(None)
779
+ if getattr(self, "x_layers", None) is not None:
780
+ for layer in self.x_layers:
781
+ with tf.name_scope(layer.name):
782
+ layer.build(None)
783
+ if getattr(self, "r_layers", None) is not None:
784
+ for layer in self.r_layers:
785
+ with tf.name_scope(layer.name):
786
+ layer.build(None)
787
+
788
+
789
+ @keras_serializable
790
+ class TFLxmertMainLayer(keras.layers.Layer):
791
+ config_class = LxmertConfig
792
+
793
+ def __init__(self, config, **kwargs):
794
+ super().__init__(**kwargs)
795
+
796
+ self.config = config
797
+ self.num_l_layers = config.l_layers
798
+ self.num_x_layers = config.x_layers
799
+ self.num_r_layers = config.r_layers
800
+ self.initializer_range = config.initializer_range
801
+ self.output_attentions = config.output_attentions
802
+ self.output_hidden_states = config.output_hidden_states
803
+ self.return_dict = config.use_return_dict
804
+ self.embeddings = TFLxmertEmbeddings(config, name="embeddings")
805
+ self.encoder = TFLxmertEncoder(config, name="encoder")
806
+ self.pooler = TFLxmertPooler(config, name="pooler")
807
+ self.config = config
808
+
809
+ def get_input_embeddings(self):
810
+ return self.embeddings
811
+
812
+ def set_input_embeddings(self, value):
813
+ self.embeddings.weight = value
814
+ self.embeddings.vocab_size = shape_list(value)[0]
815
+
816
+ def _prune_heads(self, heads_to_prune):
817
+ raise NotImplementedError
818
+
819
+ @unpack_inputs
820
+ def call(
821
+ self,
822
+ input_ids=None,
823
+ visual_feats=None,
824
+ visual_pos=None,
825
+ attention_mask=None,
826
+ visual_attention_mask=None,
827
+ token_type_ids=None,
828
+ inputs_embeds=None,
829
+ output_attentions=None,
830
+ output_hidden_states=None,
831
+ return_dict=None,
832
+ training=False,
833
+ ):
834
+ if input_ids is not None and inputs_embeds is not None:
835
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
836
+ elif input_ids is not None:
837
+ input_shape = shape_list(input_ids)
838
+ elif inputs_embeds is not None:
839
+ input_shape = shape_list(inputs_embeds)[:-1]
840
+ else:
841
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
842
+ if visual_pos is None or visual_feats is None:
843
+ raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.")
844
+
845
+ if attention_mask is None:
846
+ attention_mask = tf.fill(input_shape, 1)
847
+
848
+ if token_type_ids is None:
849
+ token_type_ids = tf.fill(input_shape, 0)
850
+
851
+ # Positional Word Embeddings
852
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training)
853
+
854
+ # We create a 3D attention mask from a 2D tensor mask.
855
+ # Sizes are [batch_size, 1, 1, to_seq_length]
856
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
857
+ # this attention mask is more simple than the triangular masking of causal attention
858
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
859
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
860
+
861
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
862
+ # masked positions, this operation will create a tensor which is 0.0 for
863
+ # positions we want to attend and -10000.0 for masked positions.
864
+ # Since we are adding it to the raw scores before the softmax, this is
865
+ # effectively the same as removing these entirely.
866
+
867
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
868
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
869
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
870
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
871
+
872
+ if visual_attention_mask is not None:
873
+ extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1]))
874
+ extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1)
875
+
876
+ extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype)
877
+ extended_visual_attention_mask = tf.multiply(
878
+ tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst
879
+ )
880
+ else:
881
+ extended_visual_attention_mask = None
882
+
883
+ # Run Lxmert encoder
884
+ encoder_outputs = self.encoder(
885
+ embedding_output,
886
+ extended_attention_mask,
887
+ visual_feats,
888
+ visual_pos,
889
+ extended_visual_attention_mask,
890
+ output_attentions,
891
+ training,
892
+ )
893
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
894
+ vision_hidden_states = visual_encoder_outputs[0]
895
+ language_hidden_states = lang_encoder_outputs[0]
896
+
897
+ all_attentions = ()
898
+ if output_attentions:
899
+ language_attentions = lang_encoder_outputs[1]
900
+ vision_attentions = visual_encoder_outputs[1]
901
+ cross_encoder_attentions = encoder_outputs[2]
902
+ all_attentions = (
903
+ language_attentions,
904
+ vision_attentions,
905
+ cross_encoder_attentions,
906
+ )
907
+
908
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
909
+
910
+ visual_output = vision_hidden_states[-1]
911
+ lang_output = language_hidden_states[-1]
912
+ pooled_output = self.pooler(lang_output)
913
+
914
+ if not return_dict:
915
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
916
+
917
+ return TFLxmertModelOutput(
918
+ pooled_output=pooled_output,
919
+ language_output=lang_output,
920
+ vision_output=visual_output,
921
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
922
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
923
+ language_attentions=language_attentions if output_attentions else None,
924
+ vision_attentions=vision_attentions if output_attentions else None,
925
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
926
+ )
927
+
928
+ def build(self, input_shape=None):
929
+ if self.built:
930
+ return
931
+ self.built = True
932
+ if getattr(self, "embeddings", None) is not None:
933
+ with tf.name_scope(self.embeddings.name):
934
+ self.embeddings.build(None)
935
+ if getattr(self, "encoder", None) is not None:
936
+ with tf.name_scope(self.encoder.name):
937
+ self.encoder.build(None)
938
+ if getattr(self, "pooler", None) is not None:
939
+ with tf.name_scope(self.pooler.name):
940
+ self.pooler.build(None)
941
+
942
+
943
+ class TFLxmertPreTrainedModel(TFPreTrainedModel):
944
+ """
945
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
946
+ models.
947
+ """
948
+
949
+ config_class = LxmertConfig
950
+ base_model_prefix = "lxmert"
951
+
952
+ @property
953
+ def dummy_inputs(self):
954
+ """
955
+ Dummy inputs to build the network.
956
+
957
+ Returns:
958
+ tf.Tensor with dummy inputs
959
+ """
960
+ batch_size = 2
961
+ num_visual_features = 10
962
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
963
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
964
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
965
+
966
+ return {
967
+ "input_ids": input_ids,
968
+ "visual_feats": visual_feats,
969
+ "visual_pos": visual_pos,
970
+ }
971
+
972
+ @property
973
+ def input_signature(self):
974
+ return {
975
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
976
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
977
+ "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"),
978
+ "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"),
979
+ "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"),
980
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
981
+ }
982
+
983
+
984
+ LXMERT_START_DOCSTRING = r"""
985
+
986
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
987
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
988
+ model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual
989
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
990
+ for question answering attribute prediction, and object tag prediction.
991
+
992
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
993
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
994
+ behavior.
995
+
996
+ <Tip>
997
+
998
+ TensorFlow models and layers in `transformers` accept two formats as input:
999
+
1000
+ - having all inputs as keyword arguments (like PyTorch models), or
1001
+ - having all inputs as a list, tuple or dict in the first positional argument.
1002
+
1003
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1004
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1005
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1006
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1007
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1008
+ positional argument:
1009
+
1010
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1011
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1012
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1013
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1014
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1015
+
1016
+ Note that when creating models and layers with
1017
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1018
+ about any of this, as you can just pass inputs like you would to any other Python function!
1019
+
1020
+ </Tip>
1021
+
1022
+ Parameters:
1023
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
1024
+ Initializing with a config file does not load the weights associated with the model, only the
1025
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1026
+ """
1027
+
1028
+ LXMERT_INPUTS_DOCSTRING = r"""
1029
+ Args:
1030
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
1031
+ Indices of input sequence tokens in the vocabulary.
1032
+
1033
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1034
+ [`PreTrainedTokenizer.encode`] for details.
1035
+
1036
+ [What are input IDs?](../glossary#input-ids)
1037
+ visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1038
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
1039
+ faster-RCNN model)
1040
+
1041
+ These are currently not provided by the transformers library.
1042
+ visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1043
+ This input represents spacial features corresponding to their relative (via index) visual features. The
1044
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1045
+ 1.
1046
+
1047
+ These are currently not provided by the transformers library.
1048
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1049
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1050
+
1051
+ - 1 for tokens that are **not masked**,
1052
+ - 0 for tokens that are **masked**.
1053
+
1054
+ [What are attention masks?](../glossary#attention-mask)
1055
+ visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1056
+ MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1057
+
1058
+ - 1 for tokens that are **not masked**,
1059
+ - 0 for tokens that are **masked**.
1060
+
1061
+ [What are attention masks?](../glossary#attention-mask)
1062
+ token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1063
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1064
+ 1]`:
1065
+
1066
+ - 0 corresponds to a *sentence A* token,
1067
+ - 1 corresponds to a *sentence B* token.
1068
+
1069
+ [What are token type IDs?](../glossary#token-type-ids)
1070
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1071
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1072
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1073
+ model's internal embedding lookup matrix.
1074
+ output_attentions (`bool`, *optional*):
1075
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1076
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1077
+ config will be used instead.
1078
+ output_hidden_states (`bool`, *optional*):
1079
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1080
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1081
+ used instead.
1082
+ return_dict (`bool`, *optional*):
1083
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1084
+ eager mode, in graph mode the value will always be set to True.
1085
+ training (`bool`, *optional*, defaults to `False`):
1086
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1087
+ behaviors between training and evaluation).
1088
+ """
1089
+
1090
+
1091
+ @add_start_docstrings(
1092
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
1093
+ LXMERT_START_DOCSTRING,
1094
+ )
1095
+ class TFLxmertModel(TFLxmertPreTrainedModel):
1096
+ def __init__(self, config, *inputs, **kwargs):
1097
+ super().__init__(config, *inputs, **kwargs)
1098
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1099
+
1100
+ @unpack_inputs
1101
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1102
+ @add_code_sample_docstrings(
1103
+ checkpoint=_CHECKPOINT_FOR_DOC,
1104
+ output_type=TFLxmertModelOutput,
1105
+ config_class=_CONFIG_FOR_DOC,
1106
+ )
1107
+ def call(
1108
+ self,
1109
+ input_ids: TFModelInputType | None = None,
1110
+ visual_feats: tf.Tensor | None = None,
1111
+ visual_pos: tf.Tensor | None = None,
1112
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1113
+ visual_attention_mask: np.ndarray | tf.Tensor | None = None,
1114
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1115
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1116
+ output_attentions: Optional[bool] = None,
1117
+ output_hidden_states: Optional[bool] = None,
1118
+ return_dict: Optional[bool] = None,
1119
+ training: bool = False,
1120
+ ) -> Union[Tuple, TFLxmertModelOutput]:
1121
+ outputs = self.lxmert(
1122
+ input_ids,
1123
+ visual_feats,
1124
+ visual_pos,
1125
+ attention_mask,
1126
+ visual_attention_mask,
1127
+ token_type_ids,
1128
+ inputs_embeds,
1129
+ output_attentions,
1130
+ output_hidden_states,
1131
+ return_dict,
1132
+ training,
1133
+ )
1134
+
1135
+ return outputs
1136
+
1137
+ def build(self, input_shape=None):
1138
+ if self.built:
1139
+ return
1140
+ self.built = True
1141
+ if getattr(self, "lxmert", None) is not None:
1142
+ with tf.name_scope(self.lxmert.name):
1143
+ self.lxmert.build(None)
1144
+
1145
+
1146
+ class TFLxmertPooler(keras.layers.Layer):
1147
+ def __init__(self, config, **kwargs):
1148
+ super().__init__(**kwargs)
1149
+ self.dense = keras.layers.Dense(
1150
+ config.hidden_size,
1151
+ kernel_initializer=get_initializer(config.initializer_range),
1152
+ activation="tanh",
1153
+ name="dense",
1154
+ )
1155
+ self.config = config
1156
+
1157
+ def call(self, hidden_states):
1158
+ # We "pool" the model by simply taking the hidden state corresponding
1159
+ # to the first token.
1160
+ first_token_tensor = hidden_states[:, 0]
1161
+ pooled_output = self.dense(first_token_tensor)
1162
+ return pooled_output
1163
+
1164
+ def build(self, input_shape=None):
1165
+ if self.built:
1166
+ return
1167
+ self.built = True
1168
+ if getattr(self, "dense", None) is not None:
1169
+ with tf.name_scope(self.dense.name):
1170
+ self.dense.build([None, None, self.config.hidden_size])
1171
+
1172
+
1173
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
1174
+ class TFLxmertPredictionHeadTransform(keras.layers.Layer):
1175
+ def __init__(self, config: LxmertConfig, **kwargs):
1176
+ super().__init__(**kwargs)
1177
+
1178
+ self.dense = keras.layers.Dense(
1179
+ units=config.hidden_size,
1180
+ kernel_initializer=get_initializer(config.initializer_range),
1181
+ name="dense",
1182
+ )
1183
+
1184
+ if isinstance(config.hidden_act, str):
1185
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
1186
+ else:
1187
+ self.transform_act_fn = config.hidden_act
1188
+
1189
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
1190
+ self.config = config
1191
+
1192
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1193
+ hidden_states = self.dense(inputs=hidden_states)
1194
+ hidden_states = self.transform_act_fn(hidden_states)
1195
+ hidden_states = self.LayerNorm(inputs=hidden_states)
1196
+
1197
+ return hidden_states
1198
+
1199
+ def build(self, input_shape=None):
1200
+ if self.built:
1201
+ return
1202
+ self.built = True
1203
+ if getattr(self, "dense", None) is not None:
1204
+ with tf.name_scope(self.dense.name):
1205
+ self.dense.build([None, None, self.config.hidden_size])
1206
+ if getattr(self, "LayerNorm", None) is not None:
1207
+ with tf.name_scope(self.LayerNorm.name):
1208
+ self.LayerNorm.build([None, None, self.config.hidden_size])
1209
+
1210
+
1211
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
1212
+ class TFLxmertLMPredictionHead(keras.layers.Layer):
1213
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1214
+ super().__init__(**kwargs)
1215
+
1216
+ self.config = config
1217
+ self.hidden_size = config.hidden_size
1218
+
1219
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1220
+
1221
+ # The output weights are the same as the input embeddings, but there is
1222
+ # an output-only bias for each token.
1223
+ self.input_embeddings = input_embeddings
1224
+
1225
+ def build(self, input_shape=None):
1226
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1227
+
1228
+ if self.built:
1229
+ return
1230
+ self.built = True
1231
+ if getattr(self, "transform", None) is not None:
1232
+ with tf.name_scope(self.transform.name):
1233
+ self.transform.build(None)
1234
+
1235
+ def get_output_embeddings(self) -> keras.layers.Layer:
1236
+ return self.input_embeddings
1237
+
1238
+ def set_output_embeddings(self, value: tf.Variable):
1239
+ self.input_embeddings.weight = value
1240
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1241
+
1242
+ def get_bias(self) -> Dict[str, tf.Variable]:
1243
+ return {"bias": self.bias}
1244
+
1245
+ def set_bias(self, value: tf.Variable):
1246
+ self.bias = value["bias"]
1247
+ self.config.vocab_size = shape_list(value["bias"])[0]
1248
+
1249
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1250
+ hidden_states = self.transform(hidden_states=hidden_states)
1251
+ seq_length = shape_list(hidden_states)[1]
1252
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1253
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1254
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1255
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1256
+
1257
+ return hidden_states
1258
+
1259
+
1260
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
1261
+ class TFLxmertMLMHead(keras.layers.Layer):
1262
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1263
+ super().__init__(**kwargs)
1264
+
1265
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1266
+
1267
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1268
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1269
+
1270
+ return prediction_scores
1271
+
1272
+ def build(self, input_shape=None):
1273
+ if self.built:
1274
+ return
1275
+ self.built = True
1276
+ if getattr(self, "predictions", None) is not None:
1277
+ with tf.name_scope(self.predictions.name):
1278
+ self.predictions.build(None)
1279
+
1280
+
1281
+ class TFLxmertPreTrainingHeads(keras.layers.Layer):
1282
+ def __init__(self, config, input_embeddings, **kwargs):
1283
+ super().__init__(**kwargs)
1284
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1285
+
1286
+ self.seq_relationship = keras.layers.Dense(
1287
+ 2,
1288
+ kernel_initializer=get_initializer(config.initializer_range),
1289
+ name="seq_relationship",
1290
+ )
1291
+ self.config = config
1292
+
1293
+ def call(self, sequence_output, pooled_output):
1294
+ prediction_scores = self.predictions(sequence_output)
1295
+ seq_relationship_score = self.seq_relationship(pooled_output)
1296
+ return prediction_scores, seq_relationship_score
1297
+
1298
+ def build(self, input_shape=None):
1299
+ if self.built:
1300
+ return
1301
+ self.built = True
1302
+ if getattr(self, "predictions", None) is not None:
1303
+ with tf.name_scope(self.predictions.name):
1304
+ self.predictions.build(None)
1305
+ if getattr(self, "seq_relationship", None) is not None:
1306
+ with tf.name_scope(self.seq_relationship.name):
1307
+ self.seq_relationship.build([None, None, self.config.hidden_size])
1308
+
1309
+
1310
+ class TFLxmertVisualAnswerHead(keras.layers.Layer):
1311
+ def __init__(self, config, num_labels, **kwargs):
1312
+ super().__init__(**kwargs)
1313
+ hid_dim = config.hidden_size
1314
+ self.dense = keras.layers.Dense(
1315
+ hid_dim * 2,
1316
+ kernel_initializer=get_initializer(config.initializer_range),
1317
+ name="logit_fc_._0",
1318
+ )
1319
+ self.activation = get_tf_activation("gelu")
1320
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2")
1321
+ self.dense_1 = keras.layers.Dense(
1322
+ num_labels,
1323
+ kernel_initializer=get_initializer(config.initializer_range),
1324
+ name="logit_fc_._3",
1325
+ )
1326
+ self.hid_dim = hid_dim
1327
+
1328
+ def call(self, hidden_states):
1329
+ hidden_states = self.dense(hidden_states)
1330
+ hidden_states = self.activation(hidden_states)
1331
+ hidden_states = self.layer_norm(hidden_states)
1332
+ hidden_states = self.dense_1(hidden_states)
1333
+
1334
+ return hidden_states
1335
+
1336
+ def build(self, input_shape=None):
1337
+ if self.built:
1338
+ return
1339
+ self.built = True
1340
+ if getattr(self, "dense", None) is not None:
1341
+ with tf.name_scope(self.dense.name):
1342
+ self.dense.build([None, None, self.hid_dim])
1343
+ if getattr(self, "layer_norm", None) is not None:
1344
+ with tf.name_scope(self.layer_norm.name):
1345
+ self.layer_norm.build([None, self.hid_dim * 2])
1346
+ if getattr(self, "dense_1", None) is not None:
1347
+ with tf.name_scope(self.dense_1.name):
1348
+ self.dense_1.build([None, None, self.hid_dim * 2])
1349
+
1350
+
1351
+ class TFLxmertVisualObjHead(keras.layers.Layer):
1352
+ def __init__(self, config, **kwargs):
1353
+ super().__init__(**kwargs)
1354
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1355
+
1356
+ # Decide the use of visual losses
1357
+ visual_losses = {}
1358
+ if config.visual_obj_loss:
1359
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
1360
+ if config.visual_attr_loss:
1361
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
1362
+ if config.visual_feat_loss:
1363
+ visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim}
1364
+ self.visual_losses = visual_losses
1365
+
1366
+ # The output weights are the same as the input embeddings, but there is
1367
+ # an output-only bias for each token.
1368
+ self.decoder_dict = {
1369
+ key: keras.layers.Dense(
1370
+ self.visual_losses[key]["num"],
1371
+ kernel_initializer=get_initializer(config.initializer_range),
1372
+ name=f"decoder_dict.{key}",
1373
+ )
1374
+ for key in self.visual_losses
1375
+ }
1376
+ self.config = config
1377
+
1378
+ def call(self, hidden_states):
1379
+ hidden_states = self.transform(hidden_states)
1380
+ output = {}
1381
+ for key in self.visual_losses:
1382
+ output[key] = self.decoder_dict[key](hidden_states)
1383
+ return output
1384
+
1385
+ def build(self, input_shape=None):
1386
+ if self.built:
1387
+ return
1388
+ self.built = True
1389
+ if getattr(self, "transform", None) is not None:
1390
+ with tf.name_scope(self.transform.name):
1391
+ self.transform.build(None)
1392
+ if getattr(self, "decoder_dict", None) is not None:
1393
+ for layer in self.decoder_dict.values():
1394
+ with tf.name_scope(layer.name):
1395
+ layer.build([None, None, self.config.hidden_size])
1396
+
1397
+
1398
+ @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING)
1399
+ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
1400
+ def __init__(self, config, *inputs, **kwargs):
1401
+ super().__init__(config, *inputs, **kwargs)
1402
+
1403
+ self.config = config
1404
+ self.num_qa_labels = config.num_qa_labels
1405
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1406
+
1407
+ # Use of pretraining tasks
1408
+ self.task_mask_lm = config.task_mask_lm
1409
+ self.task_obj_predict = config.task_obj_predict
1410
+ self.task_matched = config.task_matched
1411
+ self.task_qa = config.task_qa
1412
+
1413
+ # Lxmert backbone
1414
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1415
+
1416
+ # Pre-training heads
1417
+ self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls")
1418
+ if self.task_obj_predict:
1419
+ self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head")
1420
+ if self.task_qa:
1421
+ self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head")
1422
+
1423
+ # Loss functions
1424
+ self.loss_fcts = {
1425
+ "l2": keras.losses.Huber(delta=1.0, name="huber_loss"),
1426
+ "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1427
+ "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1428
+ }
1429
+
1430
+ visual_losses = {}
1431
+ if config.visual_obj_loss:
1432
+ visual_losses["obj"] = {
1433
+ "shape": (-1,),
1434
+ "num": config.num_object_labels,
1435
+ "loss": "visn_ce",
1436
+ }
1437
+ if config.visual_attr_loss:
1438
+ visual_losses["attr"] = {
1439
+ "shape": (-1,),
1440
+ "num": config.num_attr_labels,
1441
+ "loss": "visn_ce",
1442
+ }
1443
+ if config.visual_feat_loss:
1444
+ visual_losses["feat"] = {
1445
+ "shape": (-1, config.visual_feat_dim),
1446
+ "num": config.visual_feat_dim,
1447
+ "loss": "l2",
1448
+ }
1449
+ self.visual_losses = visual_losses
1450
+
1451
+ @property
1452
+ def dummy_inputs(self):
1453
+ """
1454
+ Dummy inputs to build the network.
1455
+
1456
+ Returns:
1457
+ tf.Tensor with dummy inputs
1458
+ """
1459
+ batch_size = 2
1460
+ num_visual_features = 10
1461
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
1462
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
1463
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
1464
+
1465
+ if self.config.task_obj_predict:
1466
+ obj_labels = {}
1467
+ if self.config.visual_attr_loss and self.config.task_obj_predict:
1468
+ obj_labels["attr"] = (
1469
+ tf.ones([batch_size, num_visual_features]),
1470
+ tf.ones([batch_size, num_visual_features]),
1471
+ )
1472
+ if self.config.visual_feat_loss and self.config.task_obj_predict:
1473
+ obj_labels["feat"] = (
1474
+ tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]),
1475
+ tf.ones([batch_size, num_visual_features]),
1476
+ )
1477
+ if self.config.visual_obj_loss and self.config.task_obj_predict:
1478
+ obj_labels["obj"] = (
1479
+ tf.ones([batch_size, num_visual_features]),
1480
+ tf.ones([batch_size, num_visual_features]),
1481
+ )
1482
+
1483
+ return {
1484
+ **{
1485
+ "input_ids": input_ids,
1486
+ "visual_feats": visual_feats,
1487
+ "visual_pos": visual_pos,
1488
+ },
1489
+ **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}),
1490
+ }
1491
+
1492
+ def get_lm_head(self):
1493
+ return self.cls.predictions
1494
+
1495
+ def get_prefix_bias_name(self):
1496
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1497
+ return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name
1498
+
1499
+ @unpack_inputs
1500
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1501
+ @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1502
+ def call(
1503
+ self,
1504
+ input_ids: TFModelInputType | None = None,
1505
+ visual_feats: tf.Tensor | None = None,
1506
+ visual_pos: tf.Tensor | None = None,
1507
+ attention_mask: tf.Tensor | None = None,
1508
+ visual_attention_mask: tf.Tensor | None = None,
1509
+ token_type_ids: tf.Tensor | None = None,
1510
+ inputs_embeds: tf.Tensor | None = None,
1511
+ masked_lm_labels: tf.Tensor | None = None,
1512
+ obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None,
1513
+ matched_label: tf.Tensor | None = None,
1514
+ ans: tf.Tensor | None = None,
1515
+ output_attentions: bool | None = None,
1516
+ output_hidden_states: bool | None = None,
1517
+ return_dict: bool | None = None,
1518
+ training: bool = False,
1519
+ ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput:
1520
+ r"""
1521
+ masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1522
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1523
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1524
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1525
+ obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`):
1526
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1527
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1528
+ the label score respectively
1529
+ matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1530
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1531
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1532
+
1533
+ - 0 indicates that the sentence does not match the image,
1534
+ - 1 indicates that the sentence does match the image.
1535
+ ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`):
1536
+ a one hot representation hof the correct answer *optional*
1537
+
1538
+ Returns:
1539
+ """
1540
+
1541
+ lxmert_output = self.lxmert(
1542
+ input_ids,
1543
+ visual_feats,
1544
+ visual_pos,
1545
+ attention_mask,
1546
+ visual_attention_mask,
1547
+ token_type_ids,
1548
+ inputs_embeds,
1549
+ output_attentions,
1550
+ output_hidden_states,
1551
+ return_dict,
1552
+ training,
1553
+ )
1554
+
1555
+ lang_output, visual_output, pooled_output = (
1556
+ lxmert_output[0],
1557
+ lxmert_output[1],
1558
+ lxmert_output[2],
1559
+ )
1560
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1561
+ if self.task_qa:
1562
+ answer_score = self.answer_head(pooled_output)
1563
+ else:
1564
+ answer_score = pooled_output[0][0]
1565
+
1566
+ total_loss = (
1567
+ None
1568
+ if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)
1569
+ else tf.constant(0.0)
1570
+ )
1571
+ losses = ()
1572
+ if masked_lm_labels is not None and self.task_mask_lm:
1573
+ masked_lm_loss = self.loss_fcts["ce"](
1574
+ tf.reshape(masked_lm_labels, [-1]),
1575
+ tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]),
1576
+ )
1577
+ total_loss += masked_lm_loss
1578
+ losses += (masked_lm_loss,)
1579
+ if matched_label is not None and self.task_matched:
1580
+ matched_loss = self.loss_fcts["ce"](
1581
+ tf.reshape(matched_label, [-1]),
1582
+ tf.reshape(cross_relationship_score, [-1, 2]),
1583
+ )
1584
+ total_loss += matched_loss
1585
+ losses += (matched_loss,)
1586
+ if obj_labels is not None and self.task_obj_predict:
1587
+ total_visn_loss = 0.0
1588
+ visn_prediction_scores_dict = self.obj_predict_head(visual_output)
1589
+ for key, key_info in self.visual_losses.items():
1590
+ label, mask_conf = obj_labels[key]
1591
+ output_dim = key_info["num"]
1592
+ loss_fct_name = key_info["loss"]
1593
+ label_shape = key_info["shape"]
1594
+ weight = self.visual_loss_normalizer
1595
+ visn_loss_fct = self.loss_fcts[loss_fct_name]
1596
+ visn_prediction_scores = visn_prediction_scores_dict[key]
1597
+ visn_loss = visn_loss_fct(
1598
+ tf.reshape(label, label_shape),
1599
+ tf.reshape(visn_prediction_scores, [-1, output_dim]),
1600
+ )
1601
+
1602
+ if visn_loss.ndim > 1: # Regression Losses
1603
+ visn_loss = tf.reduce_mean(visn_loss)
1604
+ visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight
1605
+ total_visn_loss += visn_loss
1606
+ losses += (visn_loss,)
1607
+ total_loss += total_visn_loss
1608
+ if ans is not None and self.task_qa:
1609
+ answer_loss = self.loss_fcts["ce"](
1610
+ tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels])
1611
+ )
1612
+ # exclude "*2" here to match the effect of QA losses.
1613
+ # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
1614
+ # Now : (loss *1) for 12 epochs
1615
+ #
1616
+ # * 2 # Multiply by 2 because > half of the data will not have label
1617
+ total_loss += answer_loss
1618
+ losses += (answer_loss,)
1619
+ # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach()
1620
+
1621
+ if not return_dict:
1622
+ output = (
1623
+ lang_prediction_scores,
1624
+ cross_relationship_score,
1625
+ answer_score,
1626
+ ) + lxmert_output[3:]
1627
+ return ((total_loss,) + output) if total_loss is not None else output
1628
+
1629
+ return TFLxmertForPreTrainingOutput(
1630
+ loss=total_loss,
1631
+ prediction_logits=lang_prediction_scores,
1632
+ cross_relationship_score=cross_relationship_score,
1633
+ question_answering_score=answer_score,
1634
+ language_hidden_states=lxmert_output.language_hidden_states,
1635
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1636
+ language_attentions=lxmert_output.language_attentions,
1637
+ vision_attentions=lxmert_output.vision_attentions,
1638
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1639
+ )
1640
+
1641
+ def build(self, input_shape=None):
1642
+ if self.built:
1643
+ return
1644
+ self.built = True
1645
+ if getattr(self, "lxmert", None) is not None:
1646
+ with tf.name_scope(self.lxmert.name):
1647
+ self.lxmert.build(None)
1648
+ if getattr(self, "cls", None) is not None:
1649
+ with tf.name_scope(self.cls.name):
1650
+ self.cls.build(None)
1651
+ if getattr(self, "obj_predict_head", None) is not None:
1652
+ with tf.name_scope(self.obj_predict_head.name):
1653
+ self.obj_predict_head.build(None)
1654
+ if getattr(self, "answer_head", None) is not None:
1655
+ with tf.name_scope(self.answer_head.name):
1656
+ self.answer_head.build(None)
venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+
30
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
31
+ def load_vocab(vocab_file):
32
+ """Loads a vocabulary file into a dictionary."""
33
+ vocab = collections.OrderedDict()
34
+ with open(vocab_file, "r", encoding="utf-8") as reader:
35
+ tokens = reader.readlines()
36
+ for index, token in enumerate(tokens):
37
+ token = token.rstrip("\n")
38
+ vocab[token] = index
39
+ return vocab
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, BertTokenizer->LxmertTokenizer
53
+ class LxmertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a Lxmert tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original Lxmert).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_file,
100
+ do_lower_case=True,
101
+ do_basic_tokenize=True,
102
+ never_split=None,
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ tokenize_chinese_chars=True,
109
+ strip_accents=None,
110
+ **kwargs,
111
+ ):
112
+ if not os.path.isfile(vocab_file):
113
+ raise ValueError(
114
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
115
+ " model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
116
+ )
117
+ self.vocab = load_vocab(vocab_file)
118
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
119
+ self.do_basic_tokenize = do_basic_tokenize
120
+ if do_basic_tokenize:
121
+ self.basic_tokenizer = BasicTokenizer(
122
+ do_lower_case=do_lower_case,
123
+ never_split=never_split,
124
+ tokenize_chinese_chars=tokenize_chinese_chars,
125
+ strip_accents=strip_accents,
126
+ )
127
+
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ def do_lower_case(self):
146
+ return self.basic_tokenizer.do_lower_case
147
+
148
+ @property
149
+ def vocab_size(self):
150
+ return len(self.vocab)
151
+
152
+ def get_vocab(self):
153
+ return dict(self.vocab, **self.added_tokens_encoder)
154
+
155
+ def _tokenize(self, text, split_special_tokens=False):
156
+ split_tokens = []
157
+ if self.do_basic_tokenize:
158
+ for token in self.basic_tokenizer.tokenize(
159
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
160
+ ):
161
+ # If the token is part of the never_split set
162
+ if token in self.basic_tokenizer.never_split:
163
+ split_tokens.append(token)
164
+ else:
165
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
166
+ else:
167
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
168
+ return split_tokens
169
+
170
+ def _convert_token_to_id(self, token):
171
+ """Converts a token (str) in an id using the vocab."""
172
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
173
+
174
+ def _convert_id_to_token(self, index):
175
+ """Converts an index (integer) in a token (str) using the vocab."""
176
+ return self.ids_to_tokens.get(index, self.unk_token)
177
+
178
+ def convert_tokens_to_string(self, tokens):
179
+ """Converts a sequence of tokens (string) in a single string."""
180
+ out_string = " ".join(tokens).replace(" ##", "").strip()
181
+ return out_string
182
+
183
+ def build_inputs_with_special_tokens(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
188
+ adding special tokens. A Lxmert sequence has the following format:
189
+
190
+ - single sequence: `[CLS] X [SEP]`
191
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs to which the special tokens will be added.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+
199
+ Returns:
200
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
201
+ """
202
+ if token_ids_1 is None:
203
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
204
+ cls = [self.cls_token_id]
205
+ sep = [self.sep_token_id]
206
+ return cls + token_ids_0 + sep + token_ids_1 + sep
207
+
208
+ def get_special_tokens_mask(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
210
+ ) -> List[int]:
211
+ """
212
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
213
+ special tokens using the tokenizer `prepare_for_model` method.
214
+
215
+ Args:
216
+ token_ids_0 (`List[int]`):
217
+ List of IDs.
218
+ token_ids_1 (`List[int]`, *optional*):
219
+ Optional second list of IDs for sequence pairs.
220
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the token list is already formatted with special tokens for the model.
222
+
223
+ Returns:
224
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
225
+ """
226
+
227
+ if already_has_special_tokens:
228
+ return super().get_special_tokens_mask(
229
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
230
+ )
231
+
232
+ if token_ids_1 is not None:
233
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
234
+ return [1] + ([0] * len(token_ids_0)) + [1]
235
+
236
+ def create_token_type_ids_from_sequences(
237
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
238
+ ) -> List[int]:
239
+ """
240
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
241
+ pair mask has the following format:
242
+
243
+ ```
244
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
245
+ | first sequence | second sequence |
246
+ ```
247
+
248
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+
256
+ Returns:
257
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
258
+ """
259
+ sep = [self.sep_token_id]
260
+ cls = [self.cls_token_id]
261
+ if token_ids_1 is None:
262
+ return len(cls + token_ids_0 + sep) * [0]
263
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
264
+
265
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
266
+ index = 0
267
+ if os.path.isdir(save_directory):
268
+ vocab_file = os.path.join(
269
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
270
+ )
271
+ else:
272
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
273
+ with open(vocab_file, "w", encoding="utf-8") as writer:
274
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
275
+ if index != token_index:
276
+ logger.warning(
277
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
278
+ " Please check that the vocabulary is not corrupted!"
279
+ )
280
+ index = token_index
281
+ writer.write(token + "\n")
282
+ index += 1
283
+ return (vocab_file,)
284
+
285
+
286
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
287
+ class BasicTokenizer(object):
288
+ """
289
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
290
+
291
+ Args:
292
+ do_lower_case (`bool`, *optional*, defaults to `True`):
293
+ Whether or not to lowercase the input when tokenizing.
294
+ never_split (`Iterable`, *optional*):
295
+ Collection of tokens which will never be split during tokenization. Only has an effect when
296
+ `do_basic_tokenize=True`
297
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
298
+ Whether or not to tokenize Chinese characters.
299
+
300
+ This should likely be deactivated for Japanese (see this
301
+ [issue](https://github.com/huggingface/transformers/issues/328)).
302
+ strip_accents (`bool`, *optional*):
303
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
304
+ value for `lowercase` (as in the original BERT).
305
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
306
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
307
+ the full context of the words, such as contractions.
308
+ """
309
+
310
+ def __init__(
311
+ self,
312
+ do_lower_case=True,
313
+ never_split=None,
314
+ tokenize_chinese_chars=True,
315
+ strip_accents=None,
316
+ do_split_on_punc=True,
317
+ ):
318
+ if never_split is None:
319
+ never_split = []
320
+ self.do_lower_case = do_lower_case
321
+ self.never_split = set(never_split)
322
+ self.tokenize_chinese_chars = tokenize_chinese_chars
323
+ self.strip_accents = strip_accents
324
+ self.do_split_on_punc = do_split_on_punc
325
+
326
+ def tokenize(self, text, never_split=None):
327
+ """
328
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
329
+
330
+ Args:
331
+ never_split (`List[str]`, *optional*)
332
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
333
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
334
+ """
335
+ # union() returns a new set by concatenating the two sets.
336
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
337
+ text = self._clean_text(text)
338
+
339
+ # This was added on November 1st, 2018 for the multilingual and Chinese
340
+ # models. This is also applied to the English models now, but it doesn't
341
+ # matter since the English models were not trained on any Chinese data
342
+ # and generally don't have any Chinese data in them (there are Chinese
343
+ # characters in the vocabulary because Wikipedia does have some Chinese
344
+ # words in the English Wikipedia.).
345
+ if self.tokenize_chinese_chars:
346
+ text = self._tokenize_chinese_chars(text)
347
+ # prevents treating the same character with different unicode codepoints as different characters
348
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
349
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
350
+ split_tokens = []
351
+ for token in orig_tokens:
352
+ if token not in never_split:
353
+ if self.do_lower_case:
354
+ token = token.lower()
355
+ if self.strip_accents is not False:
356
+ token = self._run_strip_accents(token)
357
+ elif self.strip_accents:
358
+ token = self._run_strip_accents(token)
359
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
360
+
361
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
362
+ return output_tokens
363
+
364
+ def _run_strip_accents(self, text):
365
+ """Strips accents from a piece of text."""
366
+ text = unicodedata.normalize("NFD", text)
367
+ output = []
368
+ for char in text:
369
+ cat = unicodedata.category(char)
370
+ if cat == "Mn":
371
+ continue
372
+ output.append(char)
373
+ return "".join(output)
374
+
375
+ def _run_split_on_punc(self, text, never_split=None):
376
+ """Splits punctuation on a piece of text."""
377
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
378
+ return [text]
379
+ chars = list(text)
380
+ i = 0
381
+ start_new_word = True
382
+ output = []
383
+ while i < len(chars):
384
+ char = chars[i]
385
+ if _is_punctuation(char):
386
+ output.append([char])
387
+ start_new_word = True
388
+ else:
389
+ if start_new_word:
390
+ output.append([])
391
+ start_new_word = False
392
+ output[-1].append(char)
393
+ i += 1
394
+
395
+ return ["".join(x) for x in output]
396
+
397
+ def _tokenize_chinese_chars(self, text):
398
+ """Adds whitespace around any CJK character."""
399
+ output = []
400
+ for char in text:
401
+ cp = ord(char)
402
+ if self._is_chinese_char(cp):
403
+ output.append(" ")
404
+ output.append(char)
405
+ output.append(" ")
406
+ else:
407
+ output.append(char)
408
+ return "".join(output)
409
+
410
+ def _is_chinese_char(self, cp):
411
+ """Checks whether CP is the codepoint of a CJK character."""
412
+ # This defines a "chinese character" as anything in the CJK Unicode block:
413
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
414
+ #
415
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
416
+ # despite its name. The modern Korean Hangul alphabet is a different block,
417
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
418
+ # space-separated words, so they are not treated specially and handled
419
+ # like the all of the other languages.
420
+ if (
421
+ (cp >= 0x4E00 and cp <= 0x9FFF)
422
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
423
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
424
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
425
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
426
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
427
+ or (cp >= 0xF900 and cp <= 0xFAFF)
428
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
429
+ ): #
430
+ return True
431
+
432
+ return False
433
+
434
+ def _clean_text(self, text):
435
+ """Performs invalid character removal and whitespace cleanup on text."""
436
+ output = []
437
+ for char in text:
438
+ cp = ord(char)
439
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
440
+ continue
441
+ if _is_whitespace(char):
442
+ output.append(" ")
443
+ else:
444
+ output.append(char)
445
+ return "".join(output)
446
+
447
+
448
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
449
+ class WordpieceTokenizer(object):
450
+ """Runs WordPiece tokenization."""
451
+
452
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
453
+ self.vocab = vocab
454
+ self.unk_token = unk_token
455
+ self.max_input_chars_per_word = max_input_chars_per_word
456
+
457
+ def tokenize(self, text):
458
+ """
459
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
460
+ tokenization using the given vocabulary.
461
+
462
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
463
+
464
+ Args:
465
+ text: A single token or whitespace separated tokens. This should have
466
+ already been passed through *BasicTokenizer*.
467
+
468
+ Returns:
469
+ A list of wordpiece tokens.
470
+ """
471
+
472
+ output_tokens = []
473
+ for token in whitespace_tokenize(text):
474
+ chars = list(token)
475
+ if len(chars) > self.max_input_chars_per_word:
476
+ output_tokens.append(self.unk_token)
477
+ continue
478
+
479
+ is_bad = False
480
+ start = 0
481
+ sub_tokens = []
482
+ while start < len(chars):
483
+ end = len(chars)
484
+ cur_substr = None
485
+ while start < end:
486
+ substr = "".join(chars[start:end])
487
+ if start > 0:
488
+ substr = "##" + substr
489
+ if substr in self.vocab:
490
+ cur_substr = substr
491
+ break
492
+ end -= 1
493
+ if cur_substr is None:
494
+ is_bad = True
495
+ break
496
+ sub_tokens.append(cur_substr)
497
+ start = end
498
+
499
+ if is_bad:
500
+ output_tokens.append(self.unk_token)
501
+ else:
502
+ output_tokens.extend(sub_tokens)
503
+ return output_tokens
venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from .tokenization_lxmert import LxmertTokenizer
23
+
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+
28
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert
29
+ class LxmertTokenizerFast(PreTrainedTokenizerFast):
30
+ r"""
31
+ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
32
+
33
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
34
+ refer to this superclass for more information regarding those methods.
35
+
36
+ Args:
37
+ vocab_file (`str`):
38
+ File containing the vocabulary.
39
+ do_lower_case (`bool`, *optional*, defaults to `True`):
40
+ Whether or not to lowercase the input when tokenizing.
41
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
42
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
43
+ token instead.
44
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
45
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
46
+ sequence classification or for a text and a question for question answering. It is also used as the last
47
+ token of a sequence built with special tokens.
48
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
49
+ The token used for padding, for example when batching sequences of different lengths.
50
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
51
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
52
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
53
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
54
+ The token used for masking values. This is the token used when training this model with masked language
55
+ modeling. This is the token which the model will try to predict.
56
+ clean_text (`bool`, *optional*, defaults to `True`):
57
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
58
+ whitespaces by the classic one.
59
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
61
+ issue](https://github.com/huggingface/transformers/issues/328)).
62
+ strip_accents (`bool`, *optional*):
63
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
64
+ value for `lowercase` (as in the original Lxmert).
65
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
66
+ The prefix for subwords.
67
+ """
68
+
69
+ vocab_files_names = VOCAB_FILES_NAMES
70
+ slow_tokenizer_class = LxmertTokenizer
71
+
72
+ def __init__(
73
+ self,
74
+ vocab_file=None,
75
+ tokenizer_file=None,
76
+ do_lower_case=True,
77
+ unk_token="[UNK]",
78
+ sep_token="[SEP]",
79
+ pad_token="[PAD]",
80
+ cls_token="[CLS]",
81
+ mask_token="[MASK]",
82
+ tokenize_chinese_chars=True,
83
+ strip_accents=None,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(
87
+ vocab_file,
88
+ tokenizer_file=tokenizer_file,
89
+ do_lower_case=do_lower_case,
90
+ unk_token=unk_token,
91
+ sep_token=sep_token,
92
+ pad_token=pad_token,
93
+ cls_token=cls_token,
94
+ mask_token=mask_token,
95
+ tokenize_chinese_chars=tokenize_chinese_chars,
96
+ strip_accents=strip_accents,
97
+ **kwargs,
98
+ )
99
+
100
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
101
+ if (
102
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
103
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
104
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
105
+ ):
106
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
107
+ normalizer_state["lowercase"] = do_lower_case
108
+ normalizer_state["strip_accents"] = strip_accents
109
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
110
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
111
+
112
+ self.do_lower_case = do_lower_case
113
+
114
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
115
+ """
116
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
117
+ adding special tokens. A Lxmert sequence has the following format:
118
+
119
+ - single sequence: `[CLS] X [SEP]`
120
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
121
+
122
+ Args:
123
+ token_ids_0 (`List[int]`):
124
+ List of IDs to which the special tokens will be added.
125
+ token_ids_1 (`List[int]`, *optional*):
126
+ Optional second list of IDs for sequence pairs.
127
+
128
+ Returns:
129
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
130
+ """
131
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
132
+
133
+ if token_ids_1 is not None:
134
+ output += token_ids_1 + [self.sep_token_id]
135
+
136
+ return output
137
+
138
+ def create_token_type_ids_from_sequences(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
143
+ pair mask has the following format:
144
+
145
+ ```
146
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
147
+ | first sequence | second sequence |
148
+ ```
149
+
150
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
160
+ """
161
+ sep = [self.sep_token_id]
162
+ cls = [self.cls_token_id]
163
+ if token_ids_1 is None:
164
+ return len(cls + token_ids_0 + sep) * [0]
165
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
166
+
167
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
168
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
169
+ return tuple(files)
venv/lib/python3.10/site-packages/transformers/models/mistral/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mistral": ["MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MistralConfig"],
21
+ }
22
+
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_mistral"] = [
31
+ "MistralForCausalLM",
32
+ "MistralModel",
33
+ "MistralPreTrainedModel",
34
+ "MistralForSequenceClassification",
35
+ ]
36
+
37
+ try:
38
+ if not is_flax_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_flax_mistral"] = [
44
+ "FlaxMistralForCausalLM",
45
+ "FlaxMistralModel",
46
+ "FlaxMistralPreTrainedModel",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_mistral import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MistralConfig
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_mistral import (
60
+ MistralForCausalLM,
61
+ MistralForSequenceClassification,
62
+ MistralModel,
63
+ MistralPreTrainedModel,
64
+ )
65
+
66
+ try:
67
+ if not is_flax_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_flax_mistral import (
73
+ FlaxMistralForCausalLM,
74
+ FlaxMistralModel,
75
+ FlaxMistralPreTrainedModel,
76
+ )
77
+
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/configuration_mistral.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/convert_mistral_weights_to_hf.cpython-310.pyc ADDED
Binary file (7.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_flax_mistral.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_mistral.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mistral/configuration_mistral.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Mistral model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class MistralConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
30
+ Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
32
+
33
+ [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
34
+ [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`MistralModel`]
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 14336):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*, defaults to 8):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
60
+ The non-linear activation function (function or string) in the decoder.
61
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
62
+ The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
63
+ allows sequence of up to 4096*32 tokens.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
67
+ The epsilon used by the rms normalization layers.
68
+ use_cache (`bool`, *optional*, defaults to `True`):
69
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
70
+ relevant if `config.is_decoder=True`.
71
+ pad_token_id (`int`, *optional*):
72
+ The id of the padding token.
73
+ bos_token_id (`int`, *optional*, defaults to 1):
74
+ The id of the "beginning-of-sequence" token.
75
+ eos_token_id (`int`, *optional*, defaults to 2):
76
+ The id of the "end-of-sequence" token.
77
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
78
+ Whether the model's input and output word embeddings should be tied.
79
+ rope_theta (`float`, *optional*, defaults to 10000.0):
80
+ The base period of the RoPE embeddings.
81
+ sliding_window (`int`, *optional*, defaults to 4096):
82
+ Sliding window attention window size. If not specified, will default to `4096`.
83
+ attention_dropout (`float`, *optional*, defaults to 0.0):
84
+ The dropout ratio for the attention probabilities.
85
+
86
+ ```python
87
+ >>> from transformers import MistralModel, MistralConfig
88
+
89
+ >>> # Initializing a Mistral 7B style configuration
90
+ >>> configuration = MistralConfig()
91
+
92
+ >>> # Initializing a model from the Mistral 7B style configuration
93
+ >>> model = MistralModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "mistral"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=32000,
105
+ hidden_size=4096,
106
+ intermediate_size=14336,
107
+ num_hidden_layers=32,
108
+ num_attention_heads=32,
109
+ num_key_value_heads=8,
110
+ hidden_act="silu",
111
+ max_position_embeddings=4096 * 32,
112
+ initializer_range=0.02,
113
+ rms_norm_eps=1e-6,
114
+ use_cache=True,
115
+ pad_token_id=None,
116
+ bos_token_id=1,
117
+ eos_token_id=2,
118
+ tie_word_embeddings=False,
119
+ rope_theta=10000.0,
120
+ sliding_window=4096,
121
+ attention_dropout=0.0,
122
+ **kwargs,
123
+ ):
124
+ self.vocab_size = vocab_size
125
+ self.max_position_embeddings = max_position_embeddings
126
+ self.hidden_size = hidden_size
127
+ self.intermediate_size = intermediate_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.sliding_window = sliding_window
131
+
132
+ # for backward compatibility
133
+ if num_key_value_heads is None:
134
+ num_key_value_heads = num_attention_heads
135
+
136
+ self.num_key_value_heads = num_key_value_heads
137
+ self.hidden_act = hidden_act
138
+ self.initializer_range = initializer_range
139
+ self.rms_norm_eps = rms_norm_eps
140
+ self.use_cache = use_cache
141
+ self.rope_theta = rope_theta
142
+ self.attention_dropout = attention_dropout
143
+
144
+ super().__init__(
145
+ pad_token_id=pad_token_id,
146
+ bos_token_id=bos_token_id,
147
+ eos_token_id=eos_token_id,
148
+ tie_word_embeddings=tie_word_embeddings,
149
+ **kwargs,
150
+ )
venv/lib/python3.10/site-packages/transformers/models/mistral/convert_mistral_weights_to_hf.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import gc
16
+ import json
17
+ import os
18
+ import shutil
19
+ import warnings
20
+
21
+ import torch
22
+
23
+ from transformers import (
24
+ LlamaTokenizer,
25
+ MistralConfig,
26
+ MistralForCausalLM,
27
+ )
28
+
29
+
30
+ try:
31
+ from transformers import LlamaTokenizerFast
32
+
33
+ tokenizer_class = LlamaTokenizerFast
34
+ except ImportError as e:
35
+ warnings.warn(e)
36
+ warnings.warn(
37
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
38
+ )
39
+ tokenizer_class = LlamaTokenizer
40
+
41
+ """
42
+ Sample usage:
43
+
44
+ ```
45
+ python src/transformers/models/mistral/convert_mistral_weights_to_hf.py \
46
+ --input_dir /path/to/downloaded/mistral/weights --model_size 7B --output_dir /output/path
47
+ ```
48
+
49
+ Thereafter, models can be loaded via:
50
+
51
+ ```py
52
+ from transformers import MistralForCausalLM, LlamaTokenizer
53
+
54
+ model = MistralForCausalLM.from_pretrained("/output/path")
55
+ tokenizer = LlamaTokenizer.from_pretrained("/output/path")
56
+ ```
57
+
58
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
59
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
60
+ """
61
+
62
+ NUM_SHARDS = {"7B": 1}
63
+
64
+
65
+ def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
66
+ return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
67
+
68
+
69
+ def read_json(path):
70
+ with open(path, "r") as f:
71
+ return json.load(f)
72
+
73
+
74
+ def write_json(text, path):
75
+ with open(path, "w") as f:
76
+ json.dump(text, f)
77
+
78
+
79
+ def write_model(model_path, input_base_path, model_size, tokenizer_path=None, safe_serialization=True):
80
+ # for backward compatibility, before you needed the repo to be called `my_repo/model_size`
81
+ if not os.path.isfile(os.path.join(input_base_path, "params.json")):
82
+ input_base_path = os.path.join(input_base_path, model_size)
83
+
84
+ os.makedirs(model_path, exist_ok=True)
85
+ tmp_model_path = os.path.join(model_path, "tmp")
86
+ os.makedirs(tmp_model_path, exist_ok=True)
87
+
88
+ params = read_json(os.path.join(input_base_path, "params.json"))
89
+ num_shards = NUM_SHARDS[model_size]
90
+
91
+ # For some reason this is a string in the params.json
92
+ sliding_window = int(params["sliding_window"])
93
+ n_layers = params["n_layers"]
94
+ n_heads = params["n_heads"]
95
+ n_heads_per_shard = n_heads // num_shards
96
+ dim = params["dim"]
97
+ dims_per_head = dim // n_heads
98
+ base = params.get("rope_theta", 10000.0)
99
+ inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
100
+ max_position_embeddings = 4096 * 8
101
+
102
+ if tokenizer_path is not None:
103
+ tokenizer = tokenizer_class(tokenizer_path)
104
+ tokenizer.save_pretrained(model_path)
105
+ vocab_size = tokenizer.vocab_size if tokenizer_path is not None else 32000
106
+
107
+ if "n_kv_heads" in params:
108
+ num_key_value_heads = params["n_kv_heads"] # for GQA / MQA
109
+ num_local_key_value_heads = num_key_value_heads // num_shards
110
+ key_value_dim = dims_per_head * num_local_key_value_heads
111
+ else: # compatibility with other checkpoints
112
+ num_key_value_heads = n_heads
113
+ num_local_key_value_heads = n_heads_per_shard
114
+ key_value_dim = dim
115
+
116
+ # permute for sliced rotary
117
+ def permute(w, n_heads=n_heads, dim1=dim, dim2=dim):
118
+ return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
119
+
120
+ print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
121
+ # Load weights
122
+ loaded = [
123
+ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
124
+ for i in range(num_shards)
125
+ ]
126
+ param_count = 0
127
+ index_dict = {"weight_map": {}}
128
+ for layer_i in range(n_layers):
129
+ filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
130
+
131
+ # Sharded
132
+ # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
133
+ # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
134
+ # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
135
+
136
+ state_dict = {
137
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
138
+ f"layers.{layer_i}.attention_norm.weight"
139
+ ].clone(),
140
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
141
+ f"layers.{layer_i}.ffn_norm.weight"
142
+ ].clone(),
143
+ }
144
+ state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
145
+ torch.cat(
146
+ [
147
+ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
148
+ for i in range(num_shards)
149
+ ],
150
+ dim=0,
151
+ ).reshape(dim, dim)
152
+ )
153
+ state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
154
+ torch.cat(
155
+ [
156
+ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
157
+ num_local_key_value_heads, dims_per_head, dim
158
+ )
159
+ for i in range(num_shards)
160
+ ],
161
+ dim=0,
162
+ ).reshape(key_value_dim, dim),
163
+ num_key_value_heads,
164
+ key_value_dim,
165
+ dim,
166
+ )
167
+ state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
168
+ [
169
+ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(num_local_key_value_heads, dims_per_head, dim)
170
+ for i in range(num_shards)
171
+ ],
172
+ dim=0,
173
+ ).reshape(key_value_dim, dim)
174
+
175
+ state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
176
+ [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
177
+ )
178
+ state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
179
+ [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
180
+ )
181
+ state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
182
+ [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
183
+ )
184
+ state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
185
+ [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
186
+ )
187
+
188
+ state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
189
+ for k, v in state_dict.items():
190
+ index_dict["weight_map"][k] = filename
191
+ param_count += v.numel()
192
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
193
+
194
+ filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
195
+ state_dict = {
196
+ "model.norm.weight": loaded[0]["norm.weight"],
197
+ "model.embed_tokens.weight": torch.cat([loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1),
198
+ "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
199
+ }
200
+
201
+ for k, v in state_dict.items():
202
+ index_dict["weight_map"][k] = filename
203
+ param_count += v.numel()
204
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
205
+
206
+ # Write configs
207
+ index_dict["metadata"] = {"total_size": param_count * 2}
208
+ write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
209
+ config = MistralConfig(
210
+ hidden_size=dim,
211
+ intermediate_size=params["hidden_dim"],
212
+ num_attention_heads=params["n_heads"],
213
+ num_hidden_layers=params["n_layers"],
214
+ rms_norm_eps=params["norm_eps"],
215
+ num_key_value_heads=num_key_value_heads,
216
+ vocab_size=vocab_size,
217
+ rope_theta=base,
218
+ max_position_embeddings=max_position_embeddings,
219
+ sliding_window=sliding_window,
220
+ )
221
+ config.save_pretrained(tmp_model_path)
222
+
223
+ # Make space so we can load the model properly now.
224
+ del state_dict
225
+ del loaded
226
+ gc.collect()
227
+
228
+ print("Loading the checkpoint in a Mistral model.")
229
+ model = MistralForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True)
230
+ # Avoid saving this as part of the config.
231
+ del model.config._name_or_path
232
+ model.config.torch_dtype = torch.float16
233
+ print("Saving in the Transformers format.")
234
+ model.save_pretrained(model_path, safe_serialization=safe_serialization)
235
+ shutil.rmtree(tmp_model_path)
236
+
237
+
238
+ def write_tokenizer(tokenizer_path, input_tokenizer_path):
239
+ # Initialize the tokenizer based on the `spm` model
240
+ print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
241
+ tokenizer = tokenizer_class(input_tokenizer_path)
242
+ tokenizer.save_pretrained(tokenizer_path)
243
+
244
+
245
+ def main():
246
+ parser = argparse.ArgumentParser()
247
+ parser.add_argument(
248
+ "--input_dir",
249
+ help="Location of Mistral weights, which contains tokenizer.model and model folders",
250
+ )
251
+ parser.add_argument(
252
+ "--model_size",
253
+ choices=["7B", "tokenizer_only"],
254
+ help="'f' models correspond to the finetuned versions, and are specific to the Mistral2 official release. For more details on Mistral2, checkout the original repo: https://huggingface.co/meta-mistral",
255
+ )
256
+ parser.add_argument(
257
+ "--output_dir",
258
+ help="Location to write HF model and tokenizer",
259
+ )
260
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
261
+ args = parser.parse_args()
262
+ spm_path = os.path.join(args.input_dir, "tokenizer.model")
263
+ if args.model_size != "tokenizer_only":
264
+ write_model(
265
+ model_path=args.output_dir,
266
+ input_base_path=args.input_dir,
267
+ model_size=args.model_size,
268
+ safe_serialization=args.safe_serialization,
269
+ tokenizer_path=spm_path,
270
+ )
271
+ else:
272
+ write_tokenizer(args.output_dir, spm_path)
273
+
274
+
275
+ if __name__ == "__main__":
276
+ main()
venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_flax_mistral.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Mistral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax Mistral model."""
16
+ from typing import Optional, Tuple
17
+
18
+ import flax.linen as nn
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.linen import combine_masks, make_causal_mask
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import (
29
+ FlaxBaseModelOutput,
30
+ FlaxBaseModelOutputWithPast,
31
+ FlaxCausalLMOutput,
32
+ FlaxCausalLMOutputWithCrossAttentions,
33
+ )
34
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, logging
35
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
36
+ from .configuration_mistral import MistralConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CONFIG_FOR_DOC = "MistralConfig"
42
+ _REAL_CHECKPOINT_FOR_DOC = "mistralai/Mistral-7B-v0.1"
43
+ _CHECKPOINT_FOR_DOC = "ksmcg/Mistral-tiny"
44
+
45
+ MISTRAL_START_DOCSTRING = r"""
46
+
47
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
48
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
49
+ etc.)
50
+
51
+ This model is also a Flax Linen
52
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
53
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
54
+
55
+ Finally, this model supports inherent JAX features such as:
56
+
57
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
58
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
59
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
60
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
61
+
62
+ Parameters:
63
+ config ([`MistralConfig`]): Model configuration class with all the parameters of the model.
64
+ Initializing with a config file does not load the weights associated with the model, only the
65
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
66
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
67
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or
68
+ `jax.numpy.bfloat16`.
69
+
70
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
71
+ specified all the computation will be performed with the given `dtype`.
72
+
73
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
74
+ parameters.**
75
+
76
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
77
+ [`~FlaxPreTrainedModel.to_bf16`].
78
+ """
79
+
80
+ MISTRAL_INPUTS_DOCSTRING = r"""
81
+ Args:
82
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
83
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
84
+ it.
85
+
86
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
87
+ [`PreTrainedTokenizer.__call__`] for details.
88
+
89
+ [What are input IDs?](../glossary#input-ids)
90
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
91
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
92
+
93
+ - 1 for tokens that are **not masked**,
94
+ - 0 for tokens that are **masked**.
95
+
96
+ [What are attention masks?](../glossary#attention-mask)
97
+
98
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
99
+ [`PreTrainedTokenizer.__call__`] for details.
100
+
101
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
102
+ `past_key_values`).
103
+
104
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
105
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
106
+ information on the default strategy.
107
+
108
+ - 1 indicates the head is **not masked**,
109
+ - 0 indicates the head is **masked**.
110
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
111
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
112
+ config.n_positions - 1]`.
113
+
114
+ [What are position IDs?](../glossary#position-ids)
115
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
116
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
117
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
118
+ output_attentions (`bool`, *optional*):
119
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
120
+ tensors for more detail.
121
+ output_hidden_states (`bool`, *optional*):
122
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
123
+ more detail.
124
+ return_dict (`bool`, *optional*):
125
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
126
+ """
127
+
128
+
129
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaRMSNorm with Llama->Mistral
130
+ class FlaxMistralRMSNorm(nn.Module):
131
+ config: MistralConfig
132
+ dtype: jnp.dtype = jnp.float32
133
+
134
+ def setup(self):
135
+ self.epsilon = self.config.rms_norm_eps
136
+ self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size)
137
+
138
+ def __call__(self, hidden_states):
139
+ variance = jnp.asarray(hidden_states, dtype=jnp.float32)
140
+ variance = jnp.power(variance, 2)
141
+ variance = variance.mean(-1, keepdims=True)
142
+ # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt`
143
+ hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon)
144
+
145
+ return self.weight * jnp.asarray(hidden_states, dtype=self.dtype)
146
+
147
+
148
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaRotaryEmbedding with Llama->Mistral
149
+ class FlaxMistralRotaryEmbedding(nn.Module):
150
+ config: MistralConfig
151
+ dtype: jnp.dtype = jnp.float32
152
+
153
+ def setup(self):
154
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
155
+ self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim)
156
+
157
+ def __call__(self, key, query, position_ids):
158
+ sincos = self.sincos[position_ids]
159
+ sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1)
160
+
161
+ key = apply_rotary_pos_emb(key, sin_pos, cos_pos)
162
+ query = apply_rotary_pos_emb(query, sin_pos, cos_pos)
163
+
164
+ key = jnp.asarray(key, dtype=self.dtype)
165
+ query = jnp.asarray(query, dtype=self.dtype)
166
+
167
+ return key, query
168
+
169
+
170
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaMLP with Llama->Mistral
171
+ class FlaxMistralMLP(nn.Module):
172
+ config: MistralConfig
173
+ dtype: jnp.dtype = jnp.float32
174
+
175
+ def setup(self):
176
+ embed_dim = self.config.hidden_size
177
+ inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim
178
+
179
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
180
+ self.act = ACT2FN[self.config.hidden_act]
181
+
182
+ self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
183
+ self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
184
+ self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
185
+
186
+ def __call__(self, hidden_states):
187
+ up_proj_states = self.up_proj(hidden_states)
188
+ gate_states = self.act(self.gate_proj(hidden_states))
189
+
190
+ hidden_states = self.down_proj(up_proj_states * gate_states)
191
+ return hidden_states
192
+
193
+
194
+ # Copied from transformers.models.llama.modeling_flax_llama.apply_rotary_pos_emb
195
+ def apply_rotary_pos_emb(tensor, sin_pos, cos_pos):
196
+ return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos)
197
+
198
+
199
+ # Copied from transformers.models.llama.modeling_flax_llama.create_sinusoidal_positions
200
+ def create_sinusoidal_positions(num_pos, dim):
201
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
202
+ freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
203
+
204
+ emb = np.concatenate((freqs, freqs), axis=-1)
205
+ out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1)
206
+ return jnp.array(out[:, :, :num_pos])
207
+
208
+
209
+ # Copied from transformers.models.llama.modeling_flax_llama.rotate_half
210
+ def rotate_half(tensor):
211
+ """Rotates half the hidden dims of the input."""
212
+ rotate_half_tensor = jnp.concatenate(
213
+ (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1
214
+ )
215
+ return rotate_half_tensor
216
+
217
+
218
+ class FlaxMistralAttention(nn.Module):
219
+ config: MistralConfig
220
+ dtype: jnp.dtype = jnp.float32
221
+
222
+ def setup(self):
223
+ config = self.config
224
+ self.hidden_size = config.hidden_size
225
+ self.num_heads = config.num_attention_heads
226
+ self.head_dim = self.hidden_size // self.num_heads
227
+ self.num_key_value_heads = config.num_key_value_heads
228
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
229
+ self.max_position_embeddings = config.max_position_embeddings
230
+ self.attention_softmax_in_fp32 = self.dtype is not jnp.float32
231
+ self.rope_theta = config.rope_theta
232
+ if (self.head_dim * self.num_heads) != self.hidden_size:
233
+ raise ValueError(
234
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
235
+ f" and `num_heads`: {self.num_heads})."
236
+ )
237
+ self.q_proj = nn.Dense(self.num_heads * self.head_dim, use_bias=False, dtype=self.dtype)
238
+ self.k_proj = nn.Dense(self.num_key_value_heads * self.head_dim, use_bias=False, dtype=self.dtype)
239
+ self.v_proj = nn.Dense(self.num_key_value_heads * self.head_dim, use_bias=False, dtype=self.dtype)
240
+ self.o_proj = nn.Dense(self.hidden_size, use_bias=False, dtype=self.dtype)
241
+ casual_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
242
+ self.causal_mask = jnp.triu(casual_mask, k=-config.sliding_window)
243
+ self.rotary_emb = FlaxMistralRotaryEmbedding(config, dtype=self.dtype)
244
+
245
+ def _split_heads(self, hidden_states, num_heads):
246
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
247
+
248
+ def _merge_heads(self, hidden_states):
249
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
250
+
251
+ @nn.compact
252
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache
253
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
254
+ """
255
+ This function takes projected key, value states from a single input token and concatenates the states to cached
256
+ states from previous steps. This function is slighly adapted from the official Flax repository:
257
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
258
+ """
259
+ # detect if we're initializing by absence of existing cache data.
260
+ is_initialized = self.has_variable("cache", "cached_key")
261
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
262
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
263
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
264
+
265
+ if is_initialized:
266
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
267
+ # update key, value caches with our new 1d spatial slices
268
+ cur_index = cache_index.value
269
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
270
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
271
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
272
+ cached_key.value = key
273
+ cached_value.value = value
274
+ num_updated_cache_vectors = query.shape[1]
275
+ cache_index.value = cache_index.value + num_updated_cache_vectors
276
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
277
+ pad_mask = jnp.broadcast_to(
278
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
279
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
280
+ )
281
+ attention_mask = combine_masks(pad_mask, attention_mask)
282
+ return key, value, attention_mask
283
+
284
+ def __call__(
285
+ self,
286
+ hidden_states: jnp.ndarray,
287
+ attention_mask: Optional[jnp.ndarray] = None,
288
+ position_ids: Optional[jnp.ndarray] = None,
289
+ deterministic: bool = True,
290
+ output_attentions: bool = False,
291
+ init_cache: bool = False,
292
+ ) -> Tuple[jnp.ndarray, jnp.ndarray]:
293
+ query_states = self.q_proj(hidden_states)
294
+ key_states = self.k_proj(hidden_states)
295
+ value_states = self.v_proj(hidden_states)
296
+
297
+ query_states = self._split_heads(query_states, self.num_heads)
298
+ key_states = self._split_heads(key_states, self.num_key_value_heads)
299
+ value_states = self._split_heads(value_states, self.num_key_value_heads)
300
+
301
+ key_states, query_states = self.rotary_emb(key_states, query_states, position_ids)
302
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
303
+ if self.has_variable("cache", "cached_key"):
304
+ mask_shift = self.variables["cache"]["cache_index"]
305
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
306
+ causal_mask = lax.dynamic_slice(
307
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
308
+ )
309
+ else:
310
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
311
+
312
+ batch_size = hidden_states.shape[0]
313
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
314
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
315
+ attention_mask = combine_masks(attention_mask, causal_mask)
316
+
317
+ if self.has_variable("cache", "cached_key") or init_cache:
318
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
319
+ key_states, value_states, query_states, attention_mask
320
+ )
321
+ key_states = jnp.repeat(key_states, self.num_key_value_groups, axis=2)
322
+ value_states = jnp.repeat(value_states, self.num_key_value_groups, axis=2)
323
+
324
+ attention_bias = lax.select(
325
+ attention_mask > 0,
326
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
327
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
328
+ )
329
+
330
+ # usual dot product attention
331
+ attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype
332
+ attn_weights = dot_product_attention_weights(
333
+ query_states,
334
+ key_states,
335
+ bias=attention_bias,
336
+ deterministic=deterministic,
337
+ dropout_rate=self.config.attention_dropout,
338
+ dtype=attention_dtype,
339
+ )
340
+
341
+ if self.attention_softmax_in_fp32:
342
+ attn_weights = attn_weights.astype(self.dtype)
343
+
344
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
345
+ attn_output = self._merge_heads(attn_output)
346
+ attn_output = self.o_proj(attn_output)
347
+
348
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
349
+ return outputs
350
+
351
+
352
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaDecoderLayer with Llama->Mistral
353
+ class FlaxMistralDecoderLayer(nn.Module):
354
+ config: MistralConfig
355
+ dtype: jnp.dtype = jnp.float32
356
+
357
+ def setup(self):
358
+ self.input_layernorm = FlaxMistralRMSNorm(self.config, dtype=self.dtype)
359
+ self.self_attn = FlaxMistralAttention(self.config, dtype=self.dtype)
360
+ self.post_attention_layernorm = FlaxMistralRMSNorm(self.config, dtype=self.dtype)
361
+ self.mlp = FlaxMistralMLP(self.config, dtype=self.dtype)
362
+
363
+ def __call__(
364
+ self,
365
+ hidden_states,
366
+ attention_mask=None,
367
+ position_ids=None,
368
+ deterministic: bool = True,
369
+ init_cache: bool = False,
370
+ output_attentions: bool = False,
371
+ ):
372
+ residual = hidden_states
373
+ hidden_states = self.input_layernorm(hidden_states)
374
+ outputs = self.self_attn(
375
+ hidden_states,
376
+ attention_mask=attention_mask,
377
+ position_ids=position_ids,
378
+ deterministic=deterministic,
379
+ init_cache=init_cache,
380
+ output_attentions=output_attentions,
381
+ )
382
+ # residual connection
383
+ attn_output = outputs[0]
384
+ hidden_states = residual + attn_output
385
+
386
+ residual = hidden_states
387
+ hidden_states = self.post_attention_layernorm(hidden_states)
388
+ hidden_states = self.mlp(hidden_states)
389
+ # residual connection
390
+ hidden_states = residual + hidden_states
391
+
392
+ return (hidden_states,) + outputs[1:]
393
+
394
+
395
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Mistral, GPT_NEO->MISTRAL, transformer->model
396
+ class FlaxMistralPreTrainedModel(FlaxPreTrainedModel):
397
+ """
398
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
399
+ models.
400
+ """
401
+
402
+ config_class = MistralConfig
403
+ base_model_prefix = "model"
404
+ module_class: nn.Module = None
405
+
406
+ def __init__(
407
+ self,
408
+ config: MistralConfig,
409
+ input_shape: Tuple = (1, 1),
410
+ seed: int = 0,
411
+ dtype: jnp.dtype = jnp.float32,
412
+ _do_init: bool = True,
413
+ **kwargs,
414
+ ):
415
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
416
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
417
+
418
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
419
+ # init input tensors
420
+ input_ids = jnp.zeros(input_shape, dtype="i4")
421
+ attention_mask = jnp.ones_like(input_ids)
422
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
423
+ params_rng, dropout_rng = jax.random.split(rng)
424
+ rngs = {"params": params_rng, "dropout": dropout_rng}
425
+
426
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
427
+
428
+ if params is not None:
429
+ random_params = flatten_dict(unfreeze(random_params))
430
+ params = flatten_dict(unfreeze(params))
431
+ for missing_key in self._missing_keys:
432
+ params[missing_key] = random_params[missing_key]
433
+ self._missing_keys = set()
434
+ return freeze(unflatten_dict(params))
435
+ else:
436
+ return random_params
437
+
438
+ def init_cache(self, batch_size, max_length):
439
+ r"""
440
+ Args:
441
+ batch_size (`int`):
442
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
443
+ max_length (`int`):
444
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
445
+ cache.
446
+ """
447
+ # init input variables to retrieve cache
448
+ input_ids = jnp.ones((batch_size, max_length))
449
+ attention_mask = jnp.ones_like(input_ids)
450
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
451
+
452
+ init_variables = self.module.init(
453
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
454
+ )
455
+ return unfreeze(init_variables["cache"])
456
+
457
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
458
+ def __call__(
459
+ self,
460
+ input_ids,
461
+ attention_mask=None,
462
+ position_ids=None,
463
+ params: dict = None,
464
+ past_key_values: dict = None,
465
+ dropout_rng: jax.random.PRNGKey = None,
466
+ train: bool = False,
467
+ output_attentions: Optional[bool] = None,
468
+ output_hidden_states: Optional[bool] = None,
469
+ return_dict: Optional[bool] = None,
470
+ ):
471
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
472
+ output_hidden_states = (
473
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
474
+ )
475
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
476
+
477
+ batch_size, sequence_length = input_ids.shape
478
+
479
+ if position_ids is None:
480
+ if past_key_values is not None:
481
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
482
+
483
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
484
+
485
+ if attention_mask is None:
486
+ attention_mask = jnp.ones((batch_size, sequence_length))
487
+
488
+ # Handle any PRNG if needed
489
+ rngs = {}
490
+ if dropout_rng is not None:
491
+ rngs["dropout"] = dropout_rng
492
+
493
+ inputs = {"params": params or self.params}
494
+
495
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxMistralAttention module
496
+ if past_key_values:
497
+ inputs["cache"] = past_key_values
498
+ mutable = ["cache"]
499
+ else:
500
+ mutable = False
501
+
502
+ outputs = self.module.apply(
503
+ inputs,
504
+ jnp.array(input_ids, dtype="i4"),
505
+ jnp.array(attention_mask, dtype="i4"),
506
+ jnp.array(position_ids, dtype="i4"),
507
+ not train,
508
+ False,
509
+ output_attentions,
510
+ output_hidden_states,
511
+ return_dict,
512
+ rngs=rngs,
513
+ mutable=mutable,
514
+ )
515
+
516
+ # add updated cache to model output
517
+ if past_key_values is not None and return_dict:
518
+ outputs, past_key_values = outputs
519
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
520
+ return outputs
521
+ elif past_key_values is not None and not return_dict:
522
+ outputs, past_key_values = outputs
523
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
524
+
525
+ return outputs
526
+
527
+
528
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaLayerCollection with Llama->Mistral
529
+ class FlaxMistralLayerCollection(nn.Module):
530
+ config: MistralConfig
531
+ dtype: jnp.dtype = jnp.float32
532
+
533
+ def setup(self):
534
+ self.blocks = [
535
+ FlaxMistralDecoderLayer(self.config, dtype=self.dtype, name=str(i))
536
+ for i in range(self.config.num_hidden_layers)
537
+ ]
538
+
539
+ def __call__(
540
+ self,
541
+ hidden_states,
542
+ attention_mask=None,
543
+ position_ids=None,
544
+ deterministic: bool = True,
545
+ init_cache: bool = False,
546
+ output_attentions: bool = False,
547
+ output_hidden_states: bool = False,
548
+ return_dict: bool = False,
549
+ ):
550
+ all_attentions = () if output_attentions else None
551
+ all_hidden_states = () if output_hidden_states else None
552
+
553
+ for block in self.blocks:
554
+ if output_hidden_states:
555
+ all_hidden_states += (hidden_states,)
556
+ layer_outputs = block(
557
+ hidden_states,
558
+ attention_mask=attention_mask,
559
+ position_ids=position_ids,
560
+ deterministic=deterministic,
561
+ init_cache=init_cache,
562
+ output_attentions=output_attentions,
563
+ )
564
+ hidden_states = layer_outputs[0]
565
+
566
+ if output_attentions:
567
+ all_attentions += (layer_outputs[1],)
568
+
569
+ # this contains possible `None` values - `FlaxMistralModule` will filter them out
570
+ outputs = (hidden_states, all_hidden_states, all_attentions)
571
+
572
+ return outputs
573
+
574
+
575
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaModule with Llama->Mistral
576
+ class FlaxMistralModule(nn.Module):
577
+ config: MistralConfig
578
+ dtype: jnp.dtype = jnp.float32
579
+
580
+ def setup(self):
581
+ self.hidden_size = self.config.hidden_size
582
+ embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range)
583
+ self.embed_tokens = nn.Embed(
584
+ self.config.vocab_size,
585
+ self.hidden_size,
586
+ embedding_init=embedding_init,
587
+ dtype=self.dtype,
588
+ )
589
+ self.layers = FlaxMistralLayerCollection(self.config, dtype=self.dtype)
590
+ self.norm = FlaxMistralRMSNorm(self.config, dtype=self.dtype)
591
+
592
+ def __call__(
593
+ self,
594
+ input_ids,
595
+ attention_mask=None,
596
+ position_ids=None,
597
+ deterministic=True,
598
+ init_cache: bool = False,
599
+ output_attentions: bool = False,
600
+ output_hidden_states: bool = False,
601
+ return_dict: bool = True,
602
+ ):
603
+ input_embeds = self.embed_tokens(input_ids.astype("i4"))
604
+
605
+ outputs = self.layers(
606
+ input_embeds,
607
+ position_ids=position_ids,
608
+ attention_mask=attention_mask,
609
+ deterministic=deterministic,
610
+ init_cache=init_cache,
611
+ output_attentions=output_attentions,
612
+ output_hidden_states=output_hidden_states,
613
+ return_dict=return_dict,
614
+ )
615
+
616
+ hidden_states = outputs[0]
617
+ hidden_states = self.norm(hidden_states)
618
+
619
+ if output_hidden_states:
620
+ all_hidden_states = outputs[1] + (hidden_states,)
621
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
622
+ else:
623
+ outputs = (hidden_states,) + outputs[1:]
624
+
625
+ if not return_dict:
626
+ return tuple(v for v in outputs if v is not None)
627
+
628
+ return FlaxBaseModelOutput(
629
+ last_hidden_state=hidden_states,
630
+ hidden_states=outputs[1],
631
+ attentions=outputs[-1],
632
+ )
633
+
634
+
635
+ @add_start_docstrings(
636
+ "The bare Mistral Model transformer outputting raw hidden-states without any specific head on top.",
637
+ MISTRAL_START_DOCSTRING,
638
+ )
639
+ class FlaxMistralModel(FlaxMistralPreTrainedModel):
640
+ module_class = FlaxMistralModule
641
+
642
+
643
+ append_call_sample_docstring(
644
+ FlaxMistralModel,
645
+ _CHECKPOINT_FOR_DOC,
646
+ FlaxBaseModelOutputWithPast,
647
+ _CONFIG_FOR_DOC,
648
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
649
+ )
650
+
651
+
652
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaForCausalLMModule with Llama->Mistral
653
+ class FlaxMistralForCausalLMModule(nn.Module):
654
+ config: MistralConfig
655
+ dtype: jnp.dtype = jnp.float32
656
+
657
+ def setup(self):
658
+ self.model = FlaxMistralModule(self.config, dtype=self.dtype)
659
+ self.lm_head = nn.Dense(
660
+ self.config.vocab_size,
661
+ use_bias=False,
662
+ dtype=self.dtype,
663
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
664
+ )
665
+
666
+ def __call__(
667
+ self,
668
+ input_ids,
669
+ attention_mask=None,
670
+ position_ids=None,
671
+ deterministic: bool = True,
672
+ init_cache: bool = False,
673
+ output_attentions: bool = False,
674
+ output_hidden_states: bool = False,
675
+ return_dict: bool = True,
676
+ ):
677
+ outputs = self.model(
678
+ input_ids,
679
+ position_ids=position_ids,
680
+ attention_mask=attention_mask,
681
+ deterministic=deterministic,
682
+ init_cache=init_cache,
683
+ output_attentions=output_attentions,
684
+ output_hidden_states=output_hidden_states,
685
+ return_dict=return_dict,
686
+ )
687
+
688
+ hidden_states = outputs[0]
689
+ lm_logits = self.lm_head(hidden_states)
690
+
691
+ if not return_dict:
692
+ return (lm_logits,) + outputs[1:]
693
+
694
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
695
+
696
+
697
+ @add_start_docstrings(
698
+ """
699
+ The Mistral Model transformer with a language modeling head (linear layer) on top.
700
+ """,
701
+ MISTRAL_START_DOCSTRING,
702
+ )
703
+
704
+ # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Mistral
705
+ class FlaxMistralForCausalLM(FlaxMistralPreTrainedModel):
706
+ module_class = FlaxMistralForCausalLMModule
707
+
708
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
709
+ # initializing the cache
710
+ batch_size, seq_length = input_ids.shape
711
+
712
+ past_key_values = self.init_cache(batch_size, max_length)
713
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
714
+ # But since Mistral uses a causal mask, those positions are masked anyways.
715
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
716
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
717
+ if attention_mask is not None:
718
+ position_ids = attention_mask.cumsum(axis=-1) - 1
719
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
720
+ else:
721
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
722
+
723
+ return {
724
+ "past_key_values": past_key_values,
725
+ "attention_mask": extended_attention_mask,
726
+ "position_ids": position_ids,
727
+ }
728
+
729
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
730
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
731
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
732
+ return model_kwargs
733
+
734
+
735
+ append_call_sample_docstring(
736
+ FlaxMistralForCausalLM,
737
+ _CHECKPOINT_FOR_DOC,
738
+ FlaxCausalLMOutputWithCrossAttentions,
739
+ _CONFIG_FOR_DOC,
740
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
741
+ )
venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_mistral.py ADDED
@@ -0,0 +1,1387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Mistral model."""
21
+ import inspect
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from ...activations import ACT2FN
33
+ from ...cache_utils import Cache, DynamicCache
34
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
35
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ is_flash_attn_2_available,
41
+ is_flash_attn_greater_or_equal_2_10,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_mistral import MistralConfig
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CONFIG_FOR_DOC = "MistralConfig"
58
+
59
+
60
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
61
+ def _get_unpad_data(attention_mask):
62
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
63
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
64
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
65
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
66
+ return (
67
+ indices,
68
+ cu_seqlens,
69
+ max_seqlen_in_batch,
70
+ )
71
+
72
+
73
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mistral
74
+ class MistralRMSNorm(nn.Module):
75
+ def __init__(self, hidden_size, eps=1e-6):
76
+ """
77
+ MistralRMSNorm is equivalent to T5LayerNorm
78
+ """
79
+ super().__init__()
80
+ self.weight = nn.Parameter(torch.ones(hidden_size))
81
+ self.variance_epsilon = eps
82
+
83
+ def forward(self, hidden_states):
84
+ input_dtype = hidden_states.dtype
85
+ hidden_states = hidden_states.to(torch.float32)
86
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
87
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
88
+ return self.weight * hidden_states.to(input_dtype)
89
+
90
+
91
+ # copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mistral
92
+ # TODO @Arthur no longer copied from LLama after static cache
93
+ class MistralRotaryEmbedding(nn.Module):
94
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
95
+ super().__init__()
96
+
97
+ self.dim = dim
98
+ self.max_position_embeddings = max_position_embeddings
99
+ self.base = base
100
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
101
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
102
+
103
+ # Build here to make `torch.jit.trace` work.
104
+ self._set_cos_sin_cache(
105
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
106
+ )
107
+
108
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
109
+ self.max_seq_len_cached = seq_len
110
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
111
+
112
+ freqs = torch.outer(t, self.inv_freq)
113
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
114
+ emb = torch.cat((freqs, freqs), dim=-1)
115
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
116
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
117
+
118
+ def forward(self, x, seq_len=None):
119
+ # x: [bs, num_attention_heads, seq_len, head_size]
120
+ if seq_len > self.max_seq_len_cached:
121
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
122
+
123
+ return (
124
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
125
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
126
+ )
127
+
128
+
129
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
130
+ def rotate_half(x):
131
+ """Rotates half the hidden dims of the input."""
132
+ x1 = x[..., : x.shape[-1] // 2]
133
+ x2 = x[..., x.shape[-1] // 2 :]
134
+ return torch.cat((-x2, x1), dim=-1)
135
+
136
+
137
+ # copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
138
+ # TODO @Arthur no longer copied from LLama after static cache
139
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
140
+ """Applies Rotary Position Embedding to the query and key tensors.
141
+
142
+ Args:
143
+ q (`torch.Tensor`): The query tensor.
144
+ k (`torch.Tensor`): The key tensor.
145
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
146
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
147
+ position_ids (`torch.Tensor`):
148
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
149
+ used to pass offsetted position ids when working with a KV-cache.
150
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
151
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
152
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
153
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
154
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
155
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
156
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
157
+ Returns:
158
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
159
+ """
160
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
161
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
162
+ q_embed = (q * cos) + (rotate_half(q) * sin)
163
+ k_embed = (k * cos) + (rotate_half(k) * sin)
164
+ return q_embed, k_embed
165
+
166
+
167
+ class MistralMLP(nn.Module):
168
+ def __init__(self, config):
169
+ super().__init__()
170
+ self.config = config
171
+ self.hidden_size = config.hidden_size
172
+ self.intermediate_size = config.intermediate_size
173
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
174
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
175
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
176
+ self.act_fn = ACT2FN[config.hidden_act]
177
+
178
+ def forward(self, x):
179
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
180
+
181
+
182
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
183
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
184
+ """
185
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
186
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
187
+ """
188
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
189
+ if n_rep == 1:
190
+ return hidden_states
191
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
192
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
193
+
194
+
195
+ class MistralAttention(nn.Module):
196
+ """
197
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
198
+ and "Generating Long Sequences with Sparse Transformers".
199
+ """
200
+
201
+ def __init__(self, config: MistralConfig, layer_idx: Optional[int] = None):
202
+ super().__init__()
203
+ self.config = config
204
+ self.layer_idx = layer_idx
205
+ if layer_idx is None:
206
+ logger.warning_once(
207
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
208
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
209
+ "when creating this class."
210
+ )
211
+
212
+ self.hidden_size = config.hidden_size
213
+ self.num_heads = config.num_attention_heads
214
+ self.head_dim = self.hidden_size // self.num_heads
215
+ self.num_key_value_heads = config.num_key_value_heads
216
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
217
+ self.max_position_embeddings = config.max_position_embeddings
218
+ self.rope_theta = config.rope_theta
219
+ self.is_causal = True
220
+ self.attention_dropout = config.attention_dropout
221
+
222
+ if (self.head_dim * self.num_heads) != self.hidden_size:
223
+ raise ValueError(
224
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
225
+ f" and `num_heads`: {self.num_heads})."
226
+ )
227
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
228
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
229
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
230
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
231
+
232
+ self.rotary_emb = MistralRotaryEmbedding(
233
+ self.head_dim,
234
+ max_position_embeddings=self.max_position_embeddings,
235
+ base=self.rope_theta,
236
+ )
237
+
238
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
239
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
240
+
241
+ def forward(
242
+ self,
243
+ hidden_states: torch.Tensor,
244
+ attention_mask: Optional[torch.Tensor] = None,
245
+ position_ids: Optional[torch.LongTensor] = None,
246
+ past_key_value: Optional[Cache] = None,
247
+ output_attentions: bool = False,
248
+ use_cache: bool = False,
249
+ **kwargs,
250
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
251
+ if "padding_mask" in kwargs:
252
+ warnings.warn(
253
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
254
+ )
255
+ bsz, q_len, _ = hidden_states.size()
256
+
257
+ query_states = self.q_proj(hidden_states)
258
+ key_states = self.k_proj(hidden_states)
259
+ value_states = self.v_proj(hidden_states)
260
+
261
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
262
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
263
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
264
+
265
+ kv_seq_len = key_states.shape[-2]
266
+ if past_key_value is not None:
267
+ if self.layer_idx is None:
268
+ raise ValueError(
269
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
270
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
271
+ "with a layer index."
272
+ )
273
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
274
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
275
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
276
+
277
+ if past_key_value is not None:
278
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
279
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
280
+
281
+ # repeat k/v heads if n_kv_heads < n_heads
282
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
283
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
284
+
285
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
286
+
287
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
288
+ raise ValueError(
289
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
290
+ f" {attn_weights.size()}"
291
+ )
292
+
293
+ if attention_mask is not None:
294
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
295
+ raise ValueError(
296
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
297
+ )
298
+
299
+ attn_weights = attn_weights + attention_mask
300
+
301
+ # upcast attention to fp32
302
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
303
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
304
+ attn_output = torch.matmul(attn_weights, value_states)
305
+
306
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
307
+ raise ValueError(
308
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
309
+ f" {attn_output.size()}"
310
+ )
311
+
312
+ attn_output = attn_output.transpose(1, 2).contiguous()
313
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
314
+
315
+ attn_output = self.o_proj(attn_output)
316
+
317
+ if not output_attentions:
318
+ attn_weights = None
319
+
320
+ return attn_output, attn_weights, past_key_value
321
+
322
+
323
+ class MistralFlashAttention2(MistralAttention):
324
+ """
325
+ Mistral flash attention module. This module inherits from `MistralAttention` as the weights of the module stays
326
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
327
+ flash attention and deal with padding tokens in case the input contains any of them.
328
+ """
329
+
330
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
331
+ def __init__(self, *args, **kwargs):
332
+ super().__init__(*args, **kwargs)
333
+
334
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
335
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
336
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
337
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
338
+
339
+ def forward(
340
+ self,
341
+ hidden_states: torch.Tensor,
342
+ attention_mask: Optional[torch.Tensor] = None,
343
+ position_ids: Optional[torch.LongTensor] = None,
344
+ past_key_value: Optional[Cache] = None,
345
+ output_attentions: bool = False,
346
+ use_cache: bool = False,
347
+ **kwargs,
348
+ ):
349
+ if "padding_mask" in kwargs:
350
+ warnings.warn(
351
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
352
+ )
353
+
354
+ # overwrite attention_mask with padding_mask
355
+ attention_mask = kwargs.pop("padding_mask")
356
+ bsz, q_len, _ = hidden_states.size()
357
+
358
+ query_states = self.q_proj(hidden_states)
359
+ key_states = self.k_proj(hidden_states)
360
+ value_states = self.v_proj(hidden_states)
361
+
362
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
363
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
364
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
365
+
366
+ kv_seq_len = key_states.shape[-2]
367
+ if past_key_value is not None:
368
+ if self.layer_idx is None:
369
+ raise ValueError(
370
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
371
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
372
+ "with a layer index."
373
+ )
374
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
375
+
376
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
377
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
378
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
379
+
380
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
381
+
382
+ use_sliding_windows = (
383
+ _flash_supports_window_size
384
+ and getattr(self.config, "sliding_window", None) is not None
385
+ and kv_seq_len > self.config.sliding_window
386
+ )
387
+
388
+ if not _flash_supports_window_size:
389
+ logger.warning_once(
390
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
391
+ " make sure to upgrade flash-attn library."
392
+ )
393
+
394
+ if past_key_value is not None:
395
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
396
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
397
+ if (
398
+ getattr(self.config, "sliding_window", None) is not None
399
+ and kv_seq_len > self.config.sliding_window
400
+ and cache_has_contents
401
+ ):
402
+ slicing_tokens = 1 - self.config.sliding_window
403
+
404
+ past_key = past_key_value[self.layer_idx][0]
405
+ past_value = past_key_value[self.layer_idx][1]
406
+
407
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
408
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
409
+
410
+ if past_key.shape[-2] != self.config.sliding_window - 1:
411
+ raise ValueError(
412
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
413
+ f" {past_key.shape}"
414
+ )
415
+
416
+ if attention_mask is not None:
417
+ attention_mask = attention_mask[:, slicing_tokens:]
418
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
419
+
420
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
421
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
422
+
423
+ # repeat k/v heads if n_kv_heads < n_heads
424
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
425
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
426
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
427
+
428
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
429
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
430
+ # cast them back in float16 just to be sure everything works as expected.
431
+ input_dtype = query_states.dtype
432
+ if input_dtype == torch.float32:
433
+ if torch.is_autocast_enabled():
434
+ target_dtype = torch.get_autocast_gpu_dtype()
435
+ # Handle the case where the model is quantized
436
+ elif hasattr(self.config, "_pre_quantization_dtype"):
437
+ target_dtype = self.config._pre_quantization_dtype
438
+ else:
439
+ target_dtype = self.q_proj.weight.dtype
440
+
441
+ logger.warning_once(
442
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
443
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
444
+ f" {target_dtype}."
445
+ )
446
+
447
+ query_states = query_states.to(target_dtype)
448
+ key_states = key_states.to(target_dtype)
449
+ value_states = value_states.to(target_dtype)
450
+
451
+ # Reashape to the expected shape for Flash Attention
452
+ query_states = query_states.transpose(1, 2)
453
+ key_states = key_states.transpose(1, 2)
454
+ value_states = value_states.transpose(1, 2)
455
+
456
+ attn_output = self._flash_attention_forward(
457
+ query_states,
458
+ key_states,
459
+ value_states,
460
+ attention_mask,
461
+ q_len,
462
+ dropout=dropout_rate,
463
+ use_sliding_windows=use_sliding_windows,
464
+ )
465
+
466
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
467
+ attn_output = self.o_proj(attn_output)
468
+
469
+ if not output_attentions:
470
+ attn_weights = None
471
+
472
+ return attn_output, attn_weights, past_key_value
473
+
474
+ def _flash_attention_forward(
475
+ self,
476
+ query_states,
477
+ key_states,
478
+ value_states,
479
+ attention_mask,
480
+ query_length,
481
+ dropout=0.0,
482
+ softmax_scale=None,
483
+ use_sliding_windows=False,
484
+ ):
485
+ """
486
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
487
+ first unpad the input, then computes the attention scores and pad the final attention scores.
488
+
489
+ Args:
490
+ query_states (`torch.Tensor`):
491
+ Input query states to be passed to Flash Attention API
492
+ key_states (`torch.Tensor`):
493
+ Input key states to be passed to Flash Attention API
494
+ value_states (`torch.Tensor`):
495
+ Input value states to be passed to Flash Attention API
496
+ attention_mask (`torch.Tensor`):
497
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
498
+ position of padding tokens and 1 for the position of non-padding tokens.
499
+ dropout (`float`):
500
+ Attention dropout
501
+ softmax_scale (`float`, *optional*):
502
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
503
+ use_sliding_windows (`bool`, *optional*):
504
+ Whether to activate sliding window attention.
505
+ """
506
+ if not self._flash_attn_uses_top_left_mask:
507
+ causal = self.is_causal
508
+ else:
509
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
510
+ causal = self.is_causal and query_length != 1
511
+
512
+ # Contains at least one padding token in the sequence
513
+ if attention_mask is not None:
514
+ batch_size = query_states.shape[0]
515
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
516
+ query_states, key_states, value_states, attention_mask, query_length
517
+ )
518
+
519
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
520
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
521
+
522
+ if not use_sliding_windows:
523
+ attn_output_unpad = flash_attn_varlen_func(
524
+ query_states,
525
+ key_states,
526
+ value_states,
527
+ cu_seqlens_q=cu_seqlens_q,
528
+ cu_seqlens_k=cu_seqlens_k,
529
+ max_seqlen_q=max_seqlen_in_batch_q,
530
+ max_seqlen_k=max_seqlen_in_batch_k,
531
+ dropout_p=dropout,
532
+ softmax_scale=softmax_scale,
533
+ causal=causal,
534
+ )
535
+ else:
536
+ attn_output_unpad = flash_attn_varlen_func(
537
+ query_states,
538
+ key_states,
539
+ value_states,
540
+ cu_seqlens_q=cu_seqlens_q,
541
+ cu_seqlens_k=cu_seqlens_k,
542
+ max_seqlen_q=max_seqlen_in_batch_q,
543
+ max_seqlen_k=max_seqlen_in_batch_k,
544
+ dropout_p=dropout,
545
+ softmax_scale=softmax_scale,
546
+ causal=causal,
547
+ window_size=(self.config.sliding_window, self.config.sliding_window),
548
+ )
549
+
550
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
551
+ else:
552
+ if not use_sliding_windows:
553
+ attn_output = flash_attn_func(
554
+ query_states,
555
+ key_states,
556
+ value_states,
557
+ dropout,
558
+ softmax_scale=softmax_scale,
559
+ causal=causal,
560
+ )
561
+ else:
562
+ attn_output = flash_attn_func(
563
+ query_states,
564
+ key_states,
565
+ value_states,
566
+ dropout,
567
+ softmax_scale=softmax_scale,
568
+ causal=causal,
569
+ window_size=(self.config.sliding_window, self.config.sliding_window),
570
+ )
571
+
572
+ return attn_output
573
+
574
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
575
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
576
+
577
+ # On the first iteration we need to properly re-create the padding mask
578
+ # by slicing it on the proper place
579
+ if kv_seq_len != attention_mask.shape[-1]:
580
+ attention_mask_num_tokens = attention_mask.shape[-1]
581
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
582
+
583
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
584
+
585
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
586
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
587
+
588
+ if query_length == kv_seq_len:
589
+ query_layer = index_first_axis(
590
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
591
+ )
592
+ cu_seqlens_q = cu_seqlens_k
593
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
594
+ indices_q = indices_k
595
+ elif query_length == 1:
596
+ max_seqlen_in_batch_q = 1
597
+ cu_seqlens_q = torch.arange(
598
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
599
+ ) # There is a memcpy here, that is very bad.
600
+ indices_q = cu_seqlens_q[:-1]
601
+ query_layer = query_layer.squeeze(1)
602
+ else:
603
+ # The -q_len: slice assumes left padding.
604
+ attention_mask = attention_mask[:, -query_length:]
605
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
606
+
607
+ return (
608
+ query_layer,
609
+ key_layer,
610
+ value_layer,
611
+ indices_q,
612
+ (cu_seqlens_q, cu_seqlens_k),
613
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
614
+ )
615
+
616
+
617
+ # copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Mistral
618
+ # TODO @Arthur no longer copied from LLama after static cache
619
+ class MistralSdpaAttention(MistralAttention):
620
+ """
621
+ Mistral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
622
+ `MistralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
623
+ SDPA API.
624
+ """
625
+
626
+ # Adapted from MistralAttention.forward
627
+ def forward(
628
+ self,
629
+ hidden_states: torch.Tensor,
630
+ attention_mask: Optional[torch.Tensor] = None,
631
+ position_ids: Optional[torch.LongTensor] = None,
632
+ past_key_value: Optional[Cache] = None,
633
+ output_attentions: bool = False,
634
+ use_cache: bool = False,
635
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
636
+ if output_attentions:
637
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
638
+ logger.warning_once(
639
+ "MistralModel is using MistralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
640
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
641
+ )
642
+ return super().forward(
643
+ hidden_states=hidden_states,
644
+ attention_mask=attention_mask,
645
+ position_ids=position_ids,
646
+ past_key_value=past_key_value,
647
+ output_attentions=output_attentions,
648
+ use_cache=use_cache,
649
+ )
650
+
651
+ bsz, q_len, _ = hidden_states.size()
652
+
653
+ query_states = self.q_proj(hidden_states)
654
+ key_states = self.k_proj(hidden_states)
655
+ value_states = self.v_proj(hidden_states)
656
+
657
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
658
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
659
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
660
+
661
+ kv_seq_len = key_states.shape[-2]
662
+ if past_key_value is not None:
663
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
664
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
665
+
666
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
667
+
668
+ if past_key_value is not None:
669
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
670
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
671
+
672
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
673
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
674
+
675
+ if attention_mask is not None:
676
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
677
+ raise ValueError(
678
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
679
+ )
680
+
681
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
682
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
683
+ if query_states.device.type == "cuda" and attention_mask is not None:
684
+ query_states = query_states.contiguous()
685
+ key_states = key_states.contiguous()
686
+ value_states = value_states.contiguous()
687
+
688
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
689
+ query_states,
690
+ key_states,
691
+ value_states,
692
+ attn_mask=attention_mask,
693
+ dropout_p=self.attention_dropout if self.training else 0.0,
694
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
695
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
696
+ )
697
+
698
+ attn_output = attn_output.transpose(1, 2).contiguous()
699
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
700
+
701
+ attn_output = self.o_proj(attn_output)
702
+
703
+ return attn_output, None, past_key_value
704
+
705
+
706
+ MISTRAL_ATTENTION_CLASSES = {
707
+ "eager": MistralAttention,
708
+ "flash_attention_2": MistralFlashAttention2,
709
+ "sdpa": MistralSdpaAttention,
710
+ }
711
+
712
+
713
+ class MistralDecoderLayer(nn.Module):
714
+ def __init__(self, config: MistralConfig, layer_idx: int):
715
+ super().__init__()
716
+ self.hidden_size = config.hidden_size
717
+
718
+ self.self_attn = MISTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
719
+
720
+ self.mlp = MistralMLP(config)
721
+ self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
722
+ self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
723
+
724
+ def forward(
725
+ self,
726
+ hidden_states: torch.Tensor,
727
+ attention_mask: Optional[torch.Tensor] = None,
728
+ position_ids: Optional[torch.LongTensor] = None,
729
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
730
+ output_attentions: Optional[bool] = False,
731
+ use_cache: Optional[bool] = False,
732
+ **kwargs,
733
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
734
+ if "padding_mask" in kwargs:
735
+ warnings.warn(
736
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
737
+ )
738
+ """
739
+ Args:
740
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
741
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
742
+ `(batch, sequence_length)` where padding elements are indicated by 0.
743
+ output_attentions (`bool`, *optional*):
744
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
745
+ returned tensors for more detail.
746
+ use_cache (`bool`, *optional*):
747
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
748
+ (see `past_key_values`).
749
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
750
+ """
751
+
752
+ residual = hidden_states
753
+
754
+ hidden_states = self.input_layernorm(hidden_states)
755
+
756
+ # Self Attention
757
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
758
+ hidden_states=hidden_states,
759
+ attention_mask=attention_mask,
760
+ position_ids=position_ids,
761
+ past_key_value=past_key_value,
762
+ output_attentions=output_attentions,
763
+ use_cache=use_cache,
764
+ )
765
+ hidden_states = residual + hidden_states
766
+
767
+ # Fully Connected
768
+ residual = hidden_states
769
+ hidden_states = self.post_attention_layernorm(hidden_states)
770
+ hidden_states = self.mlp(hidden_states)
771
+ hidden_states = residual + hidden_states
772
+
773
+ outputs = (hidden_states,)
774
+
775
+ if output_attentions:
776
+ outputs += (self_attn_weights,)
777
+
778
+ if use_cache:
779
+ outputs += (present_key_value,)
780
+
781
+ return outputs
782
+
783
+
784
+ MISTRAL_START_DOCSTRING = r"""
785
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
786
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
787
+ etc.)
788
+
789
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
790
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
791
+ and behavior.
792
+
793
+ Parameters:
794
+ config ([`MistralConfig`]):
795
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
796
+ load the weights associated with the model, only the configuration. Check out the
797
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
798
+ """
799
+
800
+
801
+ @add_start_docstrings(
802
+ "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
803
+ MISTRAL_START_DOCSTRING,
804
+ )
805
+ class MistralPreTrainedModel(PreTrainedModel):
806
+ config_class = MistralConfig
807
+ base_model_prefix = "model"
808
+ supports_gradient_checkpointing = True
809
+ _no_split_modules = ["MistralDecoderLayer"]
810
+ _skip_keys_device_placement = "past_key_values"
811
+ _supports_flash_attn_2 = True
812
+ _supports_sdpa = True
813
+ _supports_cache_class = True
814
+
815
+ def _init_weights(self, module):
816
+ std = self.config.initializer_range
817
+ if isinstance(module, nn.Linear):
818
+ module.weight.data.normal_(mean=0.0, std=std)
819
+ if module.bias is not None:
820
+ module.bias.data.zero_()
821
+ elif isinstance(module, nn.Embedding):
822
+ module.weight.data.normal_(mean=0.0, std=std)
823
+ if module.padding_idx is not None:
824
+ module.weight.data[module.padding_idx].zero_()
825
+
826
+
827
+ MISTRAL_INPUTS_DOCSTRING = r"""
828
+ Args:
829
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
830
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
831
+ it.
832
+
833
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
834
+ [`PreTrainedTokenizer.__call__`] for details.
835
+
836
+ [What are input IDs?](../glossary#input-ids)
837
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
838
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
839
+
840
+ - 1 for tokens that are **not masked**,
841
+ - 0 for tokens that are **masked**.
842
+
843
+ [What are attention masks?](../glossary#attention-mask)
844
+
845
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
846
+ [`PreTrainedTokenizer.__call__`] for details.
847
+
848
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
849
+ `past_key_values`).
850
+
851
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
852
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
853
+ information on the default strategy.
854
+
855
+ - 1 indicates the head is **not masked**,
856
+ - 0 indicates the head is **masked**.
857
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
858
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
859
+ config.n_positions - 1]`.
860
+
861
+ [What are position IDs?](../glossary#position-ids)
862
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
863
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
864
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
865
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
866
+
867
+ Two formats are allowed:
868
+ - a [`~cache_utils.Cache`] instance;
869
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
870
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
871
+ cache format.
872
+
873
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
874
+ legacy cache format will be returned.
875
+
876
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
877
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
878
+ of shape `(batch_size, sequence_length)`.
879
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
880
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
881
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
882
+ model's internal embedding lookup matrix.
883
+ use_cache (`bool`, *optional*):
884
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
885
+ `past_key_values`).
886
+ output_attentions (`bool`, *optional*):
887
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
888
+ tensors for more detail.
889
+ output_hidden_states (`bool`, *optional*):
890
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
891
+ more detail.
892
+ return_dict (`bool`, *optional*):
893
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
894
+ """
895
+
896
+
897
+ @add_start_docstrings(
898
+ "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
899
+ MISTRAL_START_DOCSTRING,
900
+ )
901
+ class MistralModel(MistralPreTrainedModel):
902
+ """
903
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
904
+
905
+ Args:
906
+ config: MistralConfig
907
+ """
908
+
909
+ def __init__(self, config: MistralConfig):
910
+ super().__init__(config)
911
+ self.padding_idx = config.pad_token_id
912
+ self.vocab_size = config.vocab_size
913
+
914
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
915
+ self.layers = nn.ModuleList(
916
+ [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
917
+ )
918
+ self._attn_implementation = config._attn_implementation
919
+ self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
920
+
921
+ self.gradient_checkpointing = False
922
+ # Initialize weights and apply final processing
923
+ self.post_init()
924
+
925
+ def get_input_embeddings(self):
926
+ return self.embed_tokens
927
+
928
+ def set_input_embeddings(self, value):
929
+ self.embed_tokens = value
930
+
931
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
932
+ def forward(
933
+ self,
934
+ input_ids: torch.LongTensor = None,
935
+ attention_mask: Optional[torch.Tensor] = None,
936
+ position_ids: Optional[torch.LongTensor] = None,
937
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
938
+ inputs_embeds: Optional[torch.FloatTensor] = None,
939
+ use_cache: Optional[bool] = None,
940
+ output_attentions: Optional[bool] = None,
941
+ output_hidden_states: Optional[bool] = None,
942
+ return_dict: Optional[bool] = None,
943
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
944
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
945
+ output_hidden_states = (
946
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
947
+ )
948
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
949
+
950
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
951
+
952
+ # retrieve input_ids and inputs_embeds
953
+ if input_ids is not None and inputs_embeds is not None:
954
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
955
+ elif input_ids is not None:
956
+ batch_size, seq_length = input_ids.shape
957
+ elif inputs_embeds is not None:
958
+ batch_size, seq_length, _ = inputs_embeds.shape
959
+ else:
960
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
961
+
962
+ if self.gradient_checkpointing and self.training:
963
+ if use_cache:
964
+ logger.warning_once(
965
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
966
+ )
967
+ use_cache = False
968
+
969
+ past_key_values_length = 0
970
+
971
+ if use_cache:
972
+ use_legacy_cache = not isinstance(past_key_values, Cache)
973
+ if use_legacy_cache:
974
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
975
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
976
+
977
+ if position_ids is None:
978
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
979
+ position_ids = torch.arange(
980
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
981
+ )
982
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
983
+ else:
984
+ position_ids = position_ids.view(-1, seq_length).long()
985
+
986
+ if inputs_embeds is None:
987
+ inputs_embeds = self.embed_tokens(input_ids)
988
+
989
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
990
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
991
+ if is_padding_right:
992
+ raise ValueError(
993
+ "You are attempting to perform batched generation with padding_side='right'"
994
+ " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to "
995
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
996
+ )
997
+
998
+ if self._attn_implementation == "flash_attention_2":
999
+ # 2d mask is passed through the layers
1000
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1001
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1002
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1003
+ # the manual implementation that requires a 4D causal mask in all cases.
1004
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1005
+ attention_mask,
1006
+ (batch_size, seq_length),
1007
+ inputs_embeds,
1008
+ past_key_values_length,
1009
+ sliding_window=self.config.sliding_window,
1010
+ )
1011
+ else:
1012
+ # 4d mask is passed through the layers
1013
+ attention_mask = _prepare_4d_causal_attention_mask(
1014
+ attention_mask,
1015
+ (batch_size, seq_length),
1016
+ inputs_embeds,
1017
+ past_key_values_length,
1018
+ sliding_window=self.config.sliding_window,
1019
+ )
1020
+
1021
+ hidden_states = inputs_embeds
1022
+
1023
+ # decoder layers
1024
+ all_hidden_states = () if output_hidden_states else None
1025
+ all_self_attns = () if output_attentions else None
1026
+ next_decoder_cache = None
1027
+
1028
+ for decoder_layer in self.layers:
1029
+ if output_hidden_states:
1030
+ all_hidden_states += (hidden_states,)
1031
+
1032
+ if self.gradient_checkpointing and self.training:
1033
+ layer_outputs = self._gradient_checkpointing_func(
1034
+ decoder_layer.__call__,
1035
+ hidden_states,
1036
+ attention_mask,
1037
+ position_ids,
1038
+ past_key_values,
1039
+ output_attentions,
1040
+ use_cache,
1041
+ )
1042
+ else:
1043
+ layer_outputs = decoder_layer(
1044
+ hidden_states,
1045
+ attention_mask=attention_mask,
1046
+ position_ids=position_ids,
1047
+ past_key_value=past_key_values,
1048
+ output_attentions=output_attentions,
1049
+ use_cache=use_cache,
1050
+ )
1051
+
1052
+ hidden_states = layer_outputs[0]
1053
+
1054
+ if use_cache:
1055
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1056
+
1057
+ if output_attentions:
1058
+ all_self_attns += (layer_outputs[1],)
1059
+
1060
+ hidden_states = self.norm(hidden_states)
1061
+
1062
+ # add hidden states from the last decoder layer
1063
+ if output_hidden_states:
1064
+ all_hidden_states += (hidden_states,)
1065
+
1066
+ next_cache = None
1067
+ if use_cache:
1068
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1069
+
1070
+ if not return_dict:
1071
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1072
+ return BaseModelOutputWithPast(
1073
+ last_hidden_state=hidden_states,
1074
+ past_key_values=next_cache,
1075
+ hidden_states=all_hidden_states,
1076
+ attentions=all_self_attns,
1077
+ )
1078
+
1079
+
1080
+ class MistralForCausalLM(MistralPreTrainedModel):
1081
+ _tied_weights_keys = ["lm_head.weight"]
1082
+
1083
+ def __init__(self, config):
1084
+ super().__init__(config)
1085
+ self.model = MistralModel(config)
1086
+ self.vocab_size = config.vocab_size
1087
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1088
+
1089
+ # Initialize weights and apply final processing
1090
+ self.post_init()
1091
+
1092
+ def get_input_embeddings(self):
1093
+ return self.model.embed_tokens
1094
+
1095
+ def set_input_embeddings(self, value):
1096
+ self.model.embed_tokens = value
1097
+
1098
+ def get_output_embeddings(self):
1099
+ return self.lm_head
1100
+
1101
+ def set_output_embeddings(self, new_embeddings):
1102
+ self.lm_head = new_embeddings
1103
+
1104
+ def set_decoder(self, decoder):
1105
+ self.model = decoder
1106
+
1107
+ def get_decoder(self):
1108
+ return self.model
1109
+
1110
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
1111
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1112
+ def forward(
1113
+ self,
1114
+ input_ids: torch.LongTensor = None,
1115
+ attention_mask: Optional[torch.Tensor] = None,
1116
+ position_ids: Optional[torch.LongTensor] = None,
1117
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1118
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1119
+ labels: Optional[torch.LongTensor] = None,
1120
+ use_cache: Optional[bool] = None,
1121
+ output_attentions: Optional[bool] = None,
1122
+ output_hidden_states: Optional[bool] = None,
1123
+ return_dict: Optional[bool] = None,
1124
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1125
+ r"""
1126
+ Args:
1127
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1128
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1129
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1130
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1131
+
1132
+ Returns:
1133
+
1134
+ Example:
1135
+
1136
+ ```python
1137
+ >>> from transformers import AutoTokenizer, MistralForCausalLM
1138
+
1139
+ >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
1140
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
1141
+
1142
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1143
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1144
+
1145
+ >>> # Generate
1146
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1147
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1148
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1149
+ ```"""
1150
+
1151
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1152
+ output_hidden_states = (
1153
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1154
+ )
1155
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1156
+
1157
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1158
+ outputs = self.model(
1159
+ input_ids=input_ids,
1160
+ attention_mask=attention_mask,
1161
+ position_ids=position_ids,
1162
+ past_key_values=past_key_values,
1163
+ inputs_embeds=inputs_embeds,
1164
+ use_cache=use_cache,
1165
+ output_attentions=output_attentions,
1166
+ output_hidden_states=output_hidden_states,
1167
+ return_dict=return_dict,
1168
+ )
1169
+
1170
+ hidden_states = outputs[0]
1171
+ logits = self.lm_head(hidden_states)
1172
+ logits = logits.float()
1173
+
1174
+ loss = None
1175
+ if labels is not None:
1176
+ # Shift so that tokens < n predict n
1177
+ shift_logits = logits[..., :-1, :].contiguous()
1178
+ shift_labels = labels[..., 1:].contiguous()
1179
+ # Flatten the tokens
1180
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1181
+ shift_labels = shift_labels.view(-1)
1182
+ # Ensure tensors are on the same device
1183
+ shift_labels = shift_labels.to(shift_logits.device)
1184
+ loss_fct = CrossEntropyLoss()
1185
+ loss = loss_fct(shift_logits, shift_labels)
1186
+
1187
+ if not return_dict:
1188
+ output = (logits,) + outputs[1:]
1189
+ return (loss,) + output if loss is not None else output
1190
+
1191
+ return CausalLMOutputWithPast(
1192
+ loss=loss,
1193
+ logits=logits,
1194
+ past_key_values=outputs.past_key_values,
1195
+ hidden_states=outputs.hidden_states,
1196
+ attentions=outputs.attentions,
1197
+ )
1198
+
1199
+ def prepare_inputs_for_generation(
1200
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1201
+ ):
1202
+ # Omit tokens covered by past_key_values
1203
+ if past_key_values is not None:
1204
+ if isinstance(past_key_values, Cache):
1205
+ cache_length = past_key_values.get_seq_length()
1206
+ past_length = past_key_values.seen_tokens
1207
+ max_cache_length = past_key_values.get_max_length()
1208
+ else:
1209
+ cache_length = past_length = past_key_values[0][0].shape[2]
1210
+ max_cache_length = None
1211
+
1212
+ # Keep only the unprocessed tokens:
1213
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1214
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1215
+ # input)
1216
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1217
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1218
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1219
+ # input_ids based on the past_length.
1220
+ elif past_length < input_ids.shape[1]:
1221
+ input_ids = input_ids[:, past_length:]
1222
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1223
+
1224
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1225
+ if (
1226
+ max_cache_length is not None
1227
+ and attention_mask is not None
1228
+ and cache_length + input_ids.shape[1] > max_cache_length
1229
+ ):
1230
+ attention_mask = attention_mask[:, -max_cache_length:]
1231
+
1232
+ position_ids = kwargs.get("position_ids", None)
1233
+ if attention_mask is not None and position_ids is None:
1234
+ # create position_ids on the fly for batch generation
1235
+ position_ids = attention_mask.long().cumsum(-1) - 1
1236
+ position_ids.masked_fill_(attention_mask == 0, 1)
1237
+ if past_key_values:
1238
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1239
+
1240
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1241
+ if inputs_embeds is not None and past_key_values is None:
1242
+ model_inputs = {"inputs_embeds": inputs_embeds}
1243
+ else:
1244
+ model_inputs = {"input_ids": input_ids}
1245
+
1246
+ model_inputs.update(
1247
+ {
1248
+ "position_ids": position_ids,
1249
+ "past_key_values": past_key_values,
1250
+ "use_cache": kwargs.get("use_cache"),
1251
+ "attention_mask": attention_mask,
1252
+ }
1253
+ )
1254
+ return model_inputs
1255
+
1256
+ @staticmethod
1257
+ def _reorder_cache(past_key_values, beam_idx):
1258
+ reordered_past = ()
1259
+ for layer_past in past_key_values:
1260
+ reordered_past += (
1261
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1262
+ )
1263
+ return reordered_past
1264
+
1265
+
1266
+ @add_start_docstrings(
1267
+ """
1268
+ The Mistral Model transformer with a sequence classification head on top (linear layer).
1269
+
1270
+ [`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1271
+ (e.g. GPT-2) do.
1272
+
1273
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1274
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1275
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1276
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1277
+ each row of the batch).
1278
+ """,
1279
+ MISTRAL_START_DOCSTRING,
1280
+ )
1281
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mistral, LLAMA->MISTRAL
1282
+ class MistralForSequenceClassification(MistralPreTrainedModel):
1283
+ def __init__(self, config):
1284
+ super().__init__(config)
1285
+ self.num_labels = config.num_labels
1286
+ self.model = MistralModel(config)
1287
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1288
+
1289
+ # Initialize weights and apply final processing
1290
+ self.post_init()
1291
+
1292
+ def get_input_embeddings(self):
1293
+ return self.model.embed_tokens
1294
+
1295
+ def set_input_embeddings(self, value):
1296
+ self.model.embed_tokens = value
1297
+
1298
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
1299
+ def forward(
1300
+ self,
1301
+ input_ids: torch.LongTensor = None,
1302
+ attention_mask: Optional[torch.Tensor] = None,
1303
+ position_ids: Optional[torch.LongTensor] = None,
1304
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1305
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1306
+ labels: Optional[torch.LongTensor] = None,
1307
+ use_cache: Optional[bool] = None,
1308
+ output_attentions: Optional[bool] = None,
1309
+ output_hidden_states: Optional[bool] = None,
1310
+ return_dict: Optional[bool] = None,
1311
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1312
+ r"""
1313
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1314
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1315
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1316
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1317
+ """
1318
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1319
+
1320
+ transformer_outputs = self.model(
1321
+ input_ids,
1322
+ attention_mask=attention_mask,
1323
+ position_ids=position_ids,
1324
+ past_key_values=past_key_values,
1325
+ inputs_embeds=inputs_embeds,
1326
+ use_cache=use_cache,
1327
+ output_attentions=output_attentions,
1328
+ output_hidden_states=output_hidden_states,
1329
+ return_dict=return_dict,
1330
+ )
1331
+ hidden_states = transformer_outputs[0]
1332
+ logits = self.score(hidden_states)
1333
+
1334
+ if input_ids is not None:
1335
+ batch_size = input_ids.shape[0]
1336
+ else:
1337
+ batch_size = inputs_embeds.shape[0]
1338
+
1339
+ if self.config.pad_token_id is None and batch_size != 1:
1340
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1341
+ if self.config.pad_token_id is None:
1342
+ sequence_lengths = -1
1343
+ else:
1344
+ if input_ids is not None:
1345
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1346
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1347
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1348
+ sequence_lengths = sequence_lengths.to(logits.device)
1349
+ else:
1350
+ sequence_lengths = -1
1351
+
1352
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1353
+
1354
+ loss = None
1355
+ if labels is not None:
1356
+ labels = labels.to(logits.device)
1357
+ if self.config.problem_type is None:
1358
+ if self.num_labels == 1:
1359
+ self.config.problem_type = "regression"
1360
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1361
+ self.config.problem_type = "single_label_classification"
1362
+ else:
1363
+ self.config.problem_type = "multi_label_classification"
1364
+
1365
+ if self.config.problem_type == "regression":
1366
+ loss_fct = MSELoss()
1367
+ if self.num_labels == 1:
1368
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1369
+ else:
1370
+ loss = loss_fct(pooled_logits, labels)
1371
+ elif self.config.problem_type == "single_label_classification":
1372
+ loss_fct = CrossEntropyLoss()
1373
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1374
+ elif self.config.problem_type == "multi_label_classification":
1375
+ loss_fct = BCEWithLogitsLoss()
1376
+ loss = loss_fct(pooled_logits, labels)
1377
+ if not return_dict:
1378
+ output = (pooled_logits,) + transformer_outputs[1:]
1379
+ return ((loss,) + output) if loss is not None else output
1380
+
1381
+ return SequenceClassifierOutputWithPast(
1382
+ loss=loss,
1383
+ logits=pooled_logits,
1384
+ past_key_values=transformer_outputs.past_key_values,
1385
+ hidden_states=transformer_outputs.hidden_states,
1386
+ attentions=transformer_outputs.attentions,
1387
+ )
venv/lib/python3.10/site-packages/transformers/models/roberta/__init__.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
29
+ "tokenization_roberta": ["RobertaTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_roberta_fast"] = ["RobertaTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_roberta"] = [
47
+ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "RobertaForCausalLM",
49
+ "RobertaForMaskedLM",
50
+ "RobertaForMultipleChoice",
51
+ "RobertaForQuestionAnswering",
52
+ "RobertaForSequenceClassification",
53
+ "RobertaForTokenClassification",
54
+ "RobertaModel",
55
+ "RobertaPreTrainedModel",
56
+ ]
57
+
58
+ try:
59
+ if not is_tf_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ _import_structure["modeling_tf_roberta"] = [
65
+ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
66
+ "TFRobertaForCausalLM",
67
+ "TFRobertaForMaskedLM",
68
+ "TFRobertaForMultipleChoice",
69
+ "TFRobertaForQuestionAnswering",
70
+ "TFRobertaForSequenceClassification",
71
+ "TFRobertaForTokenClassification",
72
+ "TFRobertaMainLayer",
73
+ "TFRobertaModel",
74
+ "TFRobertaPreTrainedModel",
75
+ ]
76
+
77
+ try:
78
+ if not is_flax_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ _import_structure["modeling_flax_roberta"] = [
84
+ "FlaxRobertaForCausalLM",
85
+ "FlaxRobertaForMaskedLM",
86
+ "FlaxRobertaForMultipleChoice",
87
+ "FlaxRobertaForQuestionAnswering",
88
+ "FlaxRobertaForSequenceClassification",
89
+ "FlaxRobertaForTokenClassification",
90
+ "FlaxRobertaModel",
91
+ "FlaxRobertaPreTrainedModel",
92
+ ]
93
+
94
+
95
+ if TYPE_CHECKING:
96
+ from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
97
+ from .tokenization_roberta import RobertaTokenizer
98
+
99
+ try:
100
+ if not is_tokenizers_available():
101
+ raise OptionalDependencyNotAvailable()
102
+ except OptionalDependencyNotAvailable:
103
+ pass
104
+ else:
105
+ from .tokenization_roberta_fast import RobertaTokenizerFast
106
+
107
+ try:
108
+ if not is_torch_available():
109
+ raise OptionalDependencyNotAvailable()
110
+ except OptionalDependencyNotAvailable:
111
+ pass
112
+ else:
113
+ from .modeling_roberta import (
114
+ ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
115
+ RobertaForCausalLM,
116
+ RobertaForMaskedLM,
117
+ RobertaForMultipleChoice,
118
+ RobertaForQuestionAnswering,
119
+ RobertaForSequenceClassification,
120
+ RobertaForTokenClassification,
121
+ RobertaModel,
122
+ RobertaPreTrainedModel,
123
+ )
124
+
125
+ try:
126
+ if not is_tf_available():
127
+ raise OptionalDependencyNotAvailable()
128
+ except OptionalDependencyNotAvailable:
129
+ pass
130
+ else:
131
+ from .modeling_tf_roberta import (
132
+ TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
133
+ TFRobertaForCausalLM,
134
+ TFRobertaForMaskedLM,
135
+ TFRobertaForMultipleChoice,
136
+ TFRobertaForQuestionAnswering,
137
+ TFRobertaForSequenceClassification,
138
+ TFRobertaForTokenClassification,
139
+ TFRobertaMainLayer,
140
+ TFRobertaModel,
141
+ TFRobertaPreTrainedModel,
142
+ )
143
+
144
+ try:
145
+ if not is_flax_available():
146
+ raise OptionalDependencyNotAvailable()
147
+ except OptionalDependencyNotAvailable:
148
+ pass
149
+ else:
150
+ from .modeling_flax_roberta import (
151
+ FlaxRobertaForCausalLM,
152
+ FlaxRobertaForMaskedLM,
153
+ FlaxRobertaForMultipleChoice,
154
+ FlaxRobertaForQuestionAnswering,
155
+ FlaxRobertaForSequenceClassification,
156
+ FlaxRobertaForTokenClassification,
157
+ FlaxRobertaModel,
158
+ FlaxRobertaPreTrainedModel,
159
+ )
160
+
161
+ else:
162
+ import sys
163
+
164
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/configuration_roberta.cpython-310.pyc ADDED
Binary file (6.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc ADDED
Binary file (34.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc ADDED
Binary file (45 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc ADDED
Binary file (50.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta_fast.cpython-310.pyc ADDED
Binary file (9.54 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/roberta/configuration_roberta.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ RoBERTa configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class RobertaConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`RobertaModel`] or a [`TFRobertaModel`]. It is
34
+ used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture.
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa
36
+ [FacebookAI/roberta-base](https://huggingface.co/FacebookAI/roberta-base) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 50265):
44
+ Vocabulary size of the RoBERTa model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`].
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
71
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
72
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
73
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
74
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
75
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
76
+ is_decoder (`bool`, *optional*, defaults to `False`):
77
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ classifier_dropout (`float`, *optional*):
82
+ The dropout ratio for the classification head.
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import RobertaConfig, RobertaModel
88
+
89
+ >>> # Initializing a RoBERTa configuration
90
+ >>> configuration = RobertaConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the configuration
93
+ >>> model = RobertaModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "roberta"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=50265,
104
+ hidden_size=768,
105
+ num_hidden_layers=12,
106
+ num_attention_heads=12,
107
+ intermediate_size=3072,
108
+ hidden_act="gelu",
109
+ hidden_dropout_prob=0.1,
110
+ attention_probs_dropout_prob=0.1,
111
+ max_position_embeddings=512,
112
+ type_vocab_size=2,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ pad_token_id=1,
116
+ bos_token_id=0,
117
+ eos_token_id=2,
118
+ position_embedding_type="absolute",
119
+ use_cache=True,
120
+ classifier_dropout=None,
121
+ **kwargs,
122
+ ):
123
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
124
+
125
+ self.vocab_size = vocab_size
126
+ self.hidden_size = hidden_size
127
+ self.num_hidden_layers = num_hidden_layers
128
+ self.num_attention_heads = num_attention_heads
129
+ self.hidden_act = hidden_act
130
+ self.intermediate_size = intermediate_size
131
+ self.hidden_dropout_prob = hidden_dropout_prob
132
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
133
+ self.max_position_embeddings = max_position_embeddings
134
+ self.type_vocab_size = type_vocab_size
135
+ self.initializer_range = initializer_range
136
+ self.layer_norm_eps = layer_norm_eps
137
+ self.position_embedding_type = position_embedding_type
138
+ self.use_cache = use_cache
139
+ self.classifier_dropout = classifier_dropout
140
+
141
+
142
+ class RobertaOnnxConfig(OnnxConfig):
143
+ @property
144
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
145
+ if self.task == "multiple-choice":
146
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
147
+ else:
148
+ dynamic_axis = {0: "batch", 1: "sequence"}
149
+ return OrderedDict(
150
+ [
151
+ ("input_ids", dynamic_axis),
152
+ ("attention_mask", dynamic_axis),
153
+ ]
154
+ )
venv/lib/python3.10/site-packages/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert RoBERTa checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import pathlib
20
+
21
+ import fairseq
22
+ import torch
23
+ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
24
+ from fairseq.modules import TransformerSentenceEncoderLayer
25
+ from packaging import version
26
+
27
+ from transformers import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
28
+ from transformers.models.bert.modeling_bert import (
29
+ BertIntermediate,
30
+ BertLayer,
31
+ BertOutput,
32
+ BertSelfAttention,
33
+ BertSelfOutput,
34
+ )
35
+ from transformers.utils import logging
36
+
37
+
38
+ if version.parse(fairseq.__version__) < version.parse("0.9.0"):
39
+ raise Exception("requires fairseq >= 0.9.0")
40
+
41
+
42
+ logging.set_verbosity_info()
43
+ logger = logging.get_logger(__name__)
44
+
45
+ SAMPLE_TEXT = "Hello world! cécé herlolip"
46
+
47
+
48
+ def convert_roberta_checkpoint_to_pytorch(
49
+ roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
50
+ ):
51
+ """
52
+ Copy/paste/tweak roberta's weights to our BERT structure.
53
+ """
54
+ roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
55
+ roberta.eval() # disable dropout
56
+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder
57
+ config = RobertaConfig(
58
+ vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
59
+ hidden_size=roberta.args.encoder_embed_dim,
60
+ num_hidden_layers=roberta.args.encoder_layers,
61
+ num_attention_heads=roberta.args.encoder_attention_heads,
62
+ intermediate_size=roberta.args.encoder_ffn_embed_dim,
63
+ max_position_embeddings=514,
64
+ type_vocab_size=1,
65
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
66
+ )
67
+ if classification_head:
68
+ config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
69
+ print("Our BERT config:", config)
70
+
71
+ model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
72
+ model.eval()
73
+
74
+ # Now let's copy all the weights.
75
+ # Embeddings
76
+ model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
77
+ model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
78
+ model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
79
+ model.roberta.embeddings.token_type_embeddings.weight
80
+ ) # just zero them out b/c RoBERTa doesn't use them.
81
+ model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
82
+ model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
83
+
84
+ for i in range(config.num_hidden_layers):
85
+ # Encoder: start of layer
86
+ layer: BertLayer = model.roberta.encoder.layer[i]
87
+ roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
88
+
89
+ # self attention
90
+ self_attn: BertSelfAttention = layer.attention.self
91
+ assert (
92
+ roberta_layer.self_attn.k_proj.weight.data.shape
93
+ == roberta_layer.self_attn.q_proj.weight.data.shape
94
+ == roberta_layer.self_attn.v_proj.weight.data.shape
95
+ == torch.Size((config.hidden_size, config.hidden_size))
96
+ )
97
+
98
+ self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
99
+ self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
100
+ self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
101
+ self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
102
+ self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
103
+ self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
104
+
105
+ # self-attention output
106
+ self_output: BertSelfOutput = layer.attention.output
107
+ assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
108
+ self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
109
+ self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
110
+ self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
111
+ self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
112
+
113
+ # intermediate
114
+ intermediate: BertIntermediate = layer.intermediate
115
+ assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
116
+ intermediate.dense.weight = roberta_layer.fc1.weight
117
+ intermediate.dense.bias = roberta_layer.fc1.bias
118
+
119
+ # output
120
+ bert_output: BertOutput = layer.output
121
+ assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
122
+ bert_output.dense.weight = roberta_layer.fc2.weight
123
+ bert_output.dense.bias = roberta_layer.fc2.bias
124
+ bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
125
+ bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
126
+ # end of layer
127
+
128
+ if classification_head:
129
+ model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
130
+ model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
131
+ model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
132
+ model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
133
+ else:
134
+ # LM Head
135
+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
136
+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
137
+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
138
+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
139
+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
140
+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
141
+
142
+ # Let's check that we get the same results.
143
+ input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
144
+
145
+ our_output = model(input_ids)[0]
146
+ if classification_head:
147
+ their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
148
+ else:
149
+ their_output = roberta.model(input_ids)[0]
150
+ print(our_output.shape, their_output.shape)
151
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
152
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
153
+ success = torch.allclose(our_output, their_output, atol=1e-3)
154
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
155
+ if not success:
156
+ raise Exception("Something went wRoNg")
157
+
158
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
159
+ print(f"Saving model to {pytorch_dump_folder_path}")
160
+ model.save_pretrained(pytorch_dump_folder_path)
161
+
162
+
163
+ if __name__ == "__main__":
164
+ parser = argparse.ArgumentParser()
165
+ # Required parameters
166
+ parser.add_argument(
167
+ "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
168
+ )
169
+ parser.add_argument(
170
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
171
+ )
172
+ parser.add_argument(
173
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
174
+ )
175
+ args = parser.parse_args()
176
+ convert_roberta_checkpoint_to_pytorch(
177
+ args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
178
+ )