applied-ai-018 commited on
Commit
6881e21
·
verified ·
1 Parent(s): f642bd9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ckpts/universal/global_step20/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/8.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/9.attention.dense.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step20/zero/9.attention.dense.weight/fp32.pt +3 -0
  6. lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/config.yaml +43 -0
  7. lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log +43 -0
  8. lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/requirements.txt +155 -0
  9. lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-summary.json +1 -0
  10. venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 +3 -0
  11. venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/blip/__init__.py +127 -0
  13. venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/configuration_blip.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/convert_blip_original_pytorch_to_hf.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/blip/configuration_blip.py +365 -0
  19. venv/lib/python3.10/site-packages/transformers/models/blip/convert_blip_original_pytorch_to_hf.py +191 -0
  20. venv/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py +312 -0
  21. venv/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py +1433 -0
  22. venv/lib/python3.10/site-packages/transformers/models/blip/modeling_blip_text.py +945 -0
  23. venv/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py +1701 -0
  24. venv/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip_text.py +1122 -0
  25. venv/lib/python3.10/site-packages/transformers/models/blip/processing_blip.py +150 -0
  26. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py +325 -0
  27. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/feature_extraction_conditional_detr.py +43 -0
  28. venv/lib/python3.10/site-packages/transformers/models/funnel/__init__.py +134 -0
  29. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/__init__.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/configuration_funnel.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/convert_funnel_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_funnel.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_tf_funnel.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel_fast.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py +166 -0
  37. venv/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py +65 -0
  38. venv/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py +1599 -0
  39. venv/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py +1871 -0
  40. venv/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel.py +534 -0
  41. venv/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py +200 -0
  42. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__init__.py +80 -0
  43. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/configuration_gpt_neox.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/modeling_gpt_neox.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/tokenization_gpt_neox_fast.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/configuration_gpt_neox.py +179 -0
  48. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py +1426 -0
  49. venv/lib/python3.10/site-packages/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py +243 -0
  50. venv/lib/python3.10/site-packages/transformers/models/layoutlmv2/__init__.py +104 -0
.gitattributes CHANGED
@@ -154,3 +154,4 @@ venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff
154
  venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
155
  venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text
156
  venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
154
  venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
155
  venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text
156
  venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
157
+ venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step20/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:720c3df14f634d26021cf13036bc0b93057d8d7b12d1c20401f541bbc6f7f4c4
3
+ size 33555612
ckpts/universal/global_step20/zero/8.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8e0933f2a678682b6c890451974e29399473898b6196dfb5e9fbd1bde1c05e8
3
+ size 33555533
ckpts/universal/global_step20/zero/9.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa01713ce86d2fdb91f0be217ea01bd94d27db97cb444e7ee2a7c173058cce4
3
+ size 16778396
ckpts/universal/global_step20/zero/9.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d48f8bb33a9f2794437c9843a070066793eed3341e5d3c8fbf3c07c083d1b4a
3
+ size 16778317
lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.0
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716396347
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.0
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-22:16:45:48,596 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-22:16:45:57,099 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-22:16:45:57,100 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-22:16:45:57,100 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'}
6
+ 2024-05-22:16:45:59,412 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 399, in cached_file
9
+ resolved_file = hf_hub_download(
10
+ File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
11
+ validate_repo_id(arg_value)
12
+ File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
13
+ raise HFValidationError(
14
+ huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'. Use `repo_type` argument if needed.
15
+ The above exception was the direct cause of the following exception:
16
+ Traceback (most recent call last):
17
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
18
+ return _run_code(code, main_globals, None,
19
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
20
+ exec(code, run_globals)
21
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
22
+ cli_evaluate()
23
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
24
+ results = evaluator.simple_evaluate(
25
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
26
+ return fn(*args, **kwargs)
27
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
28
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
29
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
30
+ return cls(**args, **args2)
31
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
32
+ self._get_config(
33
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
34
+ self._config = transformers.AutoConfig.from_pretrained(
35
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
36
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
37
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
38
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
39
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
40
+ resolved_config_file = cached_file(
41
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 463, in cached_file
42
+ raise EnvironmentError(
43
+ OSError: Incorrect path_or_model_id: '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.0
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.0
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0768f3ee319e217e961baa1b4084a7d52900f0881c0364c371308bb1f58a226
3
+ size 132457288
venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip/__init__.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_blip": [
27
+ "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "BlipConfig",
29
+ "BlipTextConfig",
30
+ "BlipVisionConfig",
31
+ ],
32
+ "processing_blip": ["BlipProcessor"],
33
+ }
34
+
35
+ try:
36
+ if not is_vision_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["image_processing_blip"] = ["BlipImageProcessor"]
42
+
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_blip"] = [
51
+ "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
52
+ "BlipModel",
53
+ "BlipPreTrainedModel",
54
+ "BlipForConditionalGeneration",
55
+ "BlipForQuestionAnswering",
56
+ "BlipVisionModel",
57
+ "BlipTextModel",
58
+ "BlipForImageTextRetrieval",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_blip"] = [
68
+ "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
69
+ "TFBlipModel",
70
+ "TFBlipPreTrainedModel",
71
+ "TFBlipForConditionalGeneration",
72
+ "TFBlipForQuestionAnswering",
73
+ "TFBlipVisionModel",
74
+ "TFBlipTextModel",
75
+ "TFBlipForImageTextRetrieval",
76
+ ]
77
+
78
+ if TYPE_CHECKING:
79
+ from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
80
+ from .processing_blip import BlipProcessor
81
+
82
+ try:
83
+ if not is_vision_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .image_processing_blip import BlipImageProcessor
89
+
90
+ try:
91
+ if not is_torch_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_blip import (
97
+ BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
98
+ BlipForConditionalGeneration,
99
+ BlipForImageTextRetrieval,
100
+ BlipForQuestionAnswering,
101
+ BlipModel,
102
+ BlipPreTrainedModel,
103
+ BlipTextModel,
104
+ BlipVisionModel,
105
+ )
106
+
107
+ try:
108
+ if not is_tf_available():
109
+ raise OptionalDependencyNotAvailable()
110
+ except OptionalDependencyNotAvailable:
111
+ pass
112
+ else:
113
+ from .modeling_tf_blip import (
114
+ TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
115
+ TFBlipForConditionalGeneration,
116
+ TFBlipForImageTextRetrieval,
117
+ TFBlipForQuestionAnswering,
118
+ TFBlipModel,
119
+ TFBlipPreTrainedModel,
120
+ TFBlipTextModel,
121
+ TFBlipVisionModel,
122
+ )
123
+
124
+ else:
125
+ import sys
126
+
127
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/configuration_blip.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/convert_blip_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip.cpython-310.pyc ADDED
Binary file (48.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip/configuration_blip.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Blip model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class BlipTextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP
33
+ text model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the `BlipText` used by the [base
35
+ architectures](https://huggingface.co/Salesforce/blip-vqa-base).
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30524):
43
+ Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by
44
+ the `inputs_ids` passed when calling [`BlipModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ encoder_hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers from the vision model.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ num_hidden_layers (`int`, *optional*, defaults to 12):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 8):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ max_position_embeddings (`int`, *optional*, defaults to 512):
56
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
57
+ just in case (e.g., 512 or 1024 or 2048).
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
62
+ The epsilon used by the layer normalization layers.
63
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
64
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
65
+ attention_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for the attention probabilities.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ bos_token_id (`int`, *optional*, defaults to 30522):
70
+ The id of the `beginning-of-sequence` token.
71
+ eos_token_id (`int`, *optional*, defaults to 2):
72
+ The id of the `end-of-sequence` token.
73
+ pad_token_id (`int`, *optional*, defaults to 0):
74
+ The id of the `padding` token.
75
+ sep_token_id (`int`, *optional*, defaults to 102):
76
+ The id of the `separator` token.
77
+ is_decoder (`bool`, *optional*, defaults to `True`):
78
+ Whether the model is used as a decoder.
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models).
81
+ label_smoothing (float, *optional*):
82
+ A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets
83
+ become a mixture of the original ground truth and a uniform distribution as described in
84
+ `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
85
+
86
+ Example:
87
+
88
+ ```python
89
+ >>> from transformers import BlipTextConfig, BlipTextModel
90
+
91
+ >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration
92
+ >>> configuration = BlipTextConfig()
93
+
94
+ >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration
95
+ >>> model = BlipTextModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+
101
+ model_type = "blip_text_model"
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=30524,
106
+ hidden_size=768,
107
+ encoder_hidden_size=768,
108
+ intermediate_size=3072,
109
+ projection_dim=768,
110
+ num_hidden_layers=12,
111
+ num_attention_heads=8,
112
+ max_position_embeddings=512,
113
+ hidden_act="gelu",
114
+ layer_norm_eps=1e-12,
115
+ hidden_dropout_prob=0.0,
116
+ attention_probs_dropout_prob=0.0,
117
+ initializer_range=0.02,
118
+ bos_token_id=30522,
119
+ eos_token_id=2,
120
+ pad_token_id=0,
121
+ sep_token_id=102,
122
+ is_decoder=True,
123
+ use_cache=True,
124
+ label_smoothing=0.0,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(
128
+ pad_token_id=pad_token_id,
129
+ bos_token_id=bos_token_id,
130
+ eos_token_id=eos_token_id,
131
+ sep_token_id=sep_token_id,
132
+ **kwargs,
133
+ )
134
+
135
+ self.vocab_size = vocab_size
136
+ self.hidden_size = hidden_size
137
+ self.encoder_hidden_size = encoder_hidden_size
138
+ self.intermediate_size = intermediate_size
139
+ self.projection_dim = projection_dim
140
+ self.hidden_dropout_prob = hidden_dropout_prob
141
+ self.num_hidden_layers = num_hidden_layers
142
+ self.num_attention_heads = num_attention_heads
143
+ self.max_position_embeddings = max_position_embeddings
144
+ self.layer_norm_eps = layer_norm_eps
145
+ self.hidden_act = hidden_act
146
+ self.initializer_range = initializer_range
147
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
148
+ self.is_decoder = is_decoder
149
+ self.use_cache = use_cache
150
+ self.label_smoothing = label_smoothing
151
+
152
+ @classmethod
153
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
154
+ cls._set_token_in_kwargs(kwargs)
155
+
156
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
157
+
158
+ # get the text config dict if we are loading from BlipConfig
159
+ if config_dict.get("model_type") == "blip":
160
+ config_dict = config_dict["text_config"]
161
+
162
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
163
+ logger.warning(
164
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
165
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
166
+ )
167
+
168
+ return cls.from_dict(config_dict, **kwargs)
169
+
170
+
171
+ class BlipVisionConfig(PretrainedConfig):
172
+ r"""
173
+ This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a
174
+ BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a
175
+ configuration defaults will yield a similar configuration to that of the Blip-base
176
+ [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture.
177
+
178
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
179
+ documentation from [`PretrainedConfig`] for more information.
180
+
181
+
182
+ Args:
183
+ hidden_size (`int`, *optional*, defaults to 768):
184
+ Dimensionality of the encoder layers and the pooler layer.
185
+ intermediate_size (`int`, *optional*, defaults to 3072):
186
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
187
+ num_hidden_layers (`int`, *optional*, defaults to 12):
188
+ Number of hidden layers in the Transformer encoder.
189
+ num_attention_heads (`int`, *optional*, defaults to 12):
190
+ Number of attention heads for each attention layer in the Transformer encoder.
191
+ image_size (`int`, *optional*, defaults to 384):
192
+ The size (resolution) of each image.
193
+ patch_size (`int`, *optional*, defaults to 16):
194
+ The size (resolution) of each patch.
195
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
196
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
197
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
198
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
199
+ The epsilon used by the layer normalization layers.
200
+ attention_dropout (`float`, *optional*, defaults to 0.0):
201
+ The dropout ratio for the attention probabilities.
202
+ initializer_range (`float`, *optional*, defaults to 1e-10):
203
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
204
+
205
+ Example:
206
+
207
+ ```python
208
+ >>> from transformers import BlipVisionConfig, BlipVisionModel
209
+
210
+ >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration
211
+ >>> configuration = BlipVisionConfig()
212
+
213
+ >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration
214
+ >>> model = BlipVisionModel(configuration)
215
+
216
+ >>> # Accessing the model configuration
217
+ >>> configuration = model.config
218
+ ```"""
219
+
220
+ model_type = "blip_vision_model"
221
+
222
+ def __init__(
223
+ self,
224
+ hidden_size=768,
225
+ intermediate_size=3072,
226
+ projection_dim=512,
227
+ num_hidden_layers=12,
228
+ num_attention_heads=12,
229
+ image_size=384,
230
+ patch_size=16,
231
+ hidden_act="gelu",
232
+ layer_norm_eps=1e-5,
233
+ attention_dropout=0.0,
234
+ initializer_range=1e-10,
235
+ **kwargs,
236
+ ):
237
+ super().__init__(**kwargs)
238
+
239
+ self.hidden_size = hidden_size
240
+ self.intermediate_size = intermediate_size
241
+ self.projection_dim = projection_dim
242
+ self.num_hidden_layers = num_hidden_layers
243
+ self.num_attention_heads = num_attention_heads
244
+ self.patch_size = patch_size
245
+ self.image_size = image_size
246
+ self.initializer_range = initializer_range
247
+ self.attention_dropout = attention_dropout
248
+ self.layer_norm_eps = layer_norm_eps
249
+ self.hidden_act = hidden_act
250
+
251
+ @classmethod
252
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
253
+ cls._set_token_in_kwargs(kwargs)
254
+
255
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
256
+
257
+ # get the vision config dict if we are loading from BlipConfig
258
+ if config_dict.get("model_type") == "blip":
259
+ config_dict = config_dict["vision_config"]
260
+
261
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
262
+ logger.warning(
263
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
264
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
265
+ )
266
+
267
+ return cls.from_dict(config_dict, **kwargs)
268
+
269
+
270
+ class BlipConfig(PretrainedConfig):
271
+ r"""
272
+ [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate
273
+ a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
274
+ a configuration with the defaults will yield a similar configuration to that of the BLIP-base
275
+ [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture.
276
+
277
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
278
+ documentation from [`PretrainedConfig`] for more information.
279
+
280
+ Args:
281
+ text_config (`dict`, *optional*):
282
+ Dictionary of configuration options used to initialize [`BlipTextConfig`].
283
+ vision_config (`dict`, *optional*):
284
+ Dictionary of configuration options used to initialize [`BlipVisionConfig`].
285
+ projection_dim (`int`, *optional*, defaults to 512):
286
+ Dimentionality of text and vision projection layers.
287
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
288
+ The inital value of the *logit_scale* paramter. Default is used as per the original BLIP implementation.
289
+ image_text_hidden_size (`int`, *optional*, defaults to 256):
290
+ Dimentionality of the hidden state of the image-text fusion layer.
291
+ label_smoothing (float, optional, *optional*, defaults to 0.0):
292
+ A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets
293
+ become a mixture of the original ground truth and a uniform distribution as described in
294
+ `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
295
+ kwargs (*optional*):
296
+ Dictionary of keyword arguments.
297
+
298
+ Example:
299
+
300
+ ```python
301
+ >>> from transformers import BlipConfig, BlipModel
302
+
303
+ >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration
304
+ >>> configuration = BlipConfig()
305
+
306
+ >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration
307
+ >>> model = BlipModel(configuration)
308
+
309
+ >>> # Accessing the model configuration
310
+ >>> configuration = model.config
311
+
312
+ >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig
313
+
314
+ >>> # Initializing a BLIPText and BLIPVision configuration
315
+ >>> config_text = BlipTextConfig()
316
+ >>> config_vision = BlipVisionConfig()
317
+
318
+ >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision)
319
+ ```"""
320
+
321
+ model_type = "blip"
322
+
323
+ def __init__(
324
+ self,
325
+ text_config=None,
326
+ vision_config=None,
327
+ projection_dim=512,
328
+ logit_scale_init_value=2.6592,
329
+ image_text_hidden_size=256,
330
+ label_smoothing=0.0,
331
+ **kwargs,
332
+ ):
333
+ super().__init__(**kwargs)
334
+
335
+ if text_config is None:
336
+ text_config = {}
337
+ logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.")
338
+
339
+ if vision_config is None:
340
+ vision_config = {}
341
+ logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.")
342
+
343
+ self.text_config = BlipTextConfig(**text_config)
344
+ self.vision_config = BlipVisionConfig(**vision_config)
345
+
346
+ self.text_config.encoder_hidden_size = self.vision_config.hidden_size
347
+
348
+ self.projection_dim = projection_dim
349
+ self.logit_scale_init_value = logit_scale_init_value
350
+ self.initializer_factor = 1.0
351
+ self.initializer_range = 0.02
352
+ self.image_text_hidden_size = image_text_hidden_size
353
+ self.label_smoothing = label_smoothing
354
+
355
+ @classmethod
356
+ def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs):
357
+ r"""
358
+ Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model
359
+ configuration.
360
+
361
+ Returns:
362
+ [`BlipConfig`]: An instance of a configuration object
363
+ """
364
+
365
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
venv/lib/python3.10/site-packages/transformers/models/blip/convert_blip_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import re
18
+
19
+ import requests
20
+ import torch
21
+
22
+ # git clone https://github.com/salesforce/BLIP.git
23
+ from models.blip import blip_decoder
24
+ from models.blip_itm import blip_itm
25
+ from models.blip_vqa import blip_vqa
26
+ from PIL import Image
27
+ from torchvision import transforms
28
+ from torchvision.transforms.functional import InterpolationMode
29
+
30
+ from transformers import (
31
+ BertTokenizer,
32
+ BlipConfig,
33
+ BlipForConditionalGeneration,
34
+ BlipForImageTextRetrieval,
35
+ BlipForQuestionAnswering,
36
+ )
37
+
38
+
39
+ def load_demo_image(image_size, device):
40
+ img_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
41
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
42
+
43
+ transform = transforms.Compose(
44
+ [
45
+ transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
46
+ transforms.ToTensor(),
47
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
48
+ ]
49
+ )
50
+ image = transform(raw_image).unsqueeze(0).to(device)
51
+ return image
52
+
53
+
54
+ def rename_key(key):
55
+ if "visual_encoder" in key:
56
+ key = re.sub("visual_encoder*", "vision_model.encoder", key)
57
+ if "blocks" in key:
58
+ key = re.sub(r"blocks", "layers", key)
59
+ if "attn" in key:
60
+ key = re.sub(r"attn", "self_attn", key)
61
+ if "norm1" in key:
62
+ key = re.sub(r"norm1", "layer_norm1", key)
63
+ if "norm2" in key:
64
+ key = re.sub(r"norm2", "layer_norm2", key)
65
+ if "encoder.norm" in key:
66
+ key = re.sub(r"encoder.norm", "post_layernorm", key)
67
+ if "encoder.patch_embed.proj" in key:
68
+ key = re.sub(r"encoder.patch_embed.proj", "embeddings.patch_embedding", key)
69
+
70
+ if "encoder.pos_embed" in key:
71
+ key = re.sub(r"encoder.pos_embed", "embeddings.position_embedding", key)
72
+ if "encoder.cls_token" in key:
73
+ key = re.sub(r"encoder.cls_token", "embeddings.class_embedding", key)
74
+
75
+ if "self_attn" in key:
76
+ key = re.sub(r"self_attn.proj", "self_attn.projection", key)
77
+
78
+ return key
79
+
80
+
81
+ @torch.no_grad()
82
+ def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None):
83
+ """
84
+ Copy/paste/tweak model's weights to transformers design.
85
+ """
86
+ if config_path is not None:
87
+ config = BlipConfig.from_pretrained(config_path)
88
+ else:
89
+ config = BlipConfig(projection_dim=512, text_config={}, vision_config={})
90
+
91
+ hf_model = BlipForConditionalGeneration(config).eval()
92
+
93
+ model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
94
+
95
+ pt_model = blip_decoder(pretrained=model_url, image_size=384, vit="base")
96
+ pt_model = pt_model.eval()
97
+
98
+ modified_state_dict = pt_model.state_dict()
99
+ for key in modified_state_dict.copy():
100
+ value = modified_state_dict.pop(key)
101
+ renamed_key = rename_key(key)
102
+ modified_state_dict[renamed_key] = value
103
+
104
+ hf_model.load_state_dict(modified_state_dict)
105
+
106
+ image_size = 384
107
+ image = load_demo_image(image_size=image_size, device="cpu")
108
+ tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
109
+ input_ids = tokenizer(["a picture of"]).input_ids
110
+
111
+ out = hf_model.generate(image, input_ids)
112
+
113
+ assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
114
+
115
+ out = hf_model.generate(image)
116
+
117
+ assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
118
+
119
+ if pytorch_dump_folder_path is not None:
120
+ hf_model.save_pretrained(pytorch_dump_folder_path)
121
+
122
+ # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
123
+ model_url = (
124
+ "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
125
+ )
126
+
127
+ vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit="base")
128
+ vqa_model.eval()
129
+
130
+ modified_state_dict = vqa_model.state_dict()
131
+ for key in modified_state_dict.copy():
132
+ value = modified_state_dict.pop(key)
133
+ renamed_key = rename_key(key)
134
+ modified_state_dict[renamed_key] = value
135
+
136
+ hf_vqa_model = BlipForQuestionAnswering(config)
137
+
138
+ hf_vqa_model.load_state_dict(modified_state_dict)
139
+
140
+ question = ["How many dogs are in this image?"]
141
+ question_input_ids = tokenizer(question, return_tensors="pt").input_ids
142
+
143
+ answer = hf_vqa_model.generate(question_input_ids, image)
144
+ print(tokenizer.decode(answer[0]))
145
+
146
+ assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
147
+ if pytorch_dump_folder_path is not None:
148
+ hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa")
149
+
150
+ model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
151
+
152
+ itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit="base")
153
+ itm_model.eval()
154
+
155
+ modified_state_dict = itm_model.state_dict()
156
+ for key in modified_state_dict.copy():
157
+ value = modified_state_dict.pop(key)
158
+ renamed_key = rename_key(key)
159
+ modified_state_dict[renamed_key] = value
160
+
161
+ hf_itm_model = BlipForImageTextRetrieval(config)
162
+
163
+ question = ["A picture of a woman with a dog sitting in a beach"]
164
+ question_input_ids = tokenizer(
165
+ question,
166
+ return_tensors="pt",
167
+ padding="max_length",
168
+ truncation=True,
169
+ max_length=35,
170
+ ).input_ids
171
+
172
+ hf_itm_model.load_state_dict(modified_state_dict)
173
+ hf_itm_model.eval()
174
+
175
+ out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True)
176
+ out = hf_itm_model(question_input_ids, image, use_itm_head=False)
177
+
178
+ assert out[0].item() == 0.2110687494277954
179
+ assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127
180
+
181
+ if pytorch_dump_folder_path is not None:
182
+ hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm")
183
+
184
+
185
+ if __name__ == "__main__":
186
+ parser = argparse.ArgumentParser()
187
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
188
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
189
+ args = parser.parse_args()
190
+
191
+ convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
venv/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for BLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ OPENAI_CLIP_MEAN,
25
+ OPENAI_CLIP_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ validate_kwargs,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_vision_available, logging
38
+
39
+
40
+ if is_vision_available():
41
+ import PIL
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class BlipImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a BLIP image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
54
+ `do_resize` parameter in the `preprocess` method.
55
+ size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
56
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
57
+ method.
58
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
59
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
60
+ overridden by the `resample` parameter in the `preprocess` method.
61
+ do_rescale (`bool`, *optional*, defaults to `True`):
62
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
63
+ `do_rescale` parameter in the `preprocess` method.
64
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
65
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
66
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
67
+ do_normalize (`bool`, *optional*, defaults to `True`):
68
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
69
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
70
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
71
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
72
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
73
+ overridden by the `image_mean` parameter in the `preprocess` method.
74
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
75
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
76
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
77
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
78
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
79
+ Whether to convert the image to RGB.
80
+ """
81
+
82
+ model_input_names = ["pixel_values"]
83
+
84
+ def __init__(
85
+ self,
86
+ do_resize: bool = True,
87
+ size: Dict[str, int] = None,
88
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
89
+ do_rescale: bool = True,
90
+ rescale_factor: Union[int, float] = 1 / 255,
91
+ do_normalize: bool = True,
92
+ image_mean: Optional[Union[float, List[float]]] = None,
93
+ image_std: Optional[Union[float, List[float]]] = None,
94
+ do_convert_rgb: bool = True,
95
+ **kwargs,
96
+ ) -> None:
97
+ super().__init__(**kwargs)
98
+ size = size if size is not None else {"height": 384, "width": 384}
99
+ size = get_size_dict(size, default_to_square=True)
100
+
101
+ self.do_resize = do_resize
102
+ self.size = size
103
+ self.resample = resample
104
+ self.do_rescale = do_rescale
105
+ self.rescale_factor = rescale_factor
106
+ self.do_normalize = do_normalize
107
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
108
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
109
+ self.do_convert_rgb = do_convert_rgb
110
+ self._valid_processor_keys = [
111
+ "images",
112
+ "do_resize",
113
+ "size",
114
+ "resample",
115
+ "do_rescale",
116
+ "rescale_factor",
117
+ "do_normalize",
118
+ "image_mean",
119
+ "image_std",
120
+ "do_convert_rgb",
121
+ "return_tensors",
122
+ "data_format",
123
+ "input_data_format",
124
+ ]
125
+
126
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
127
+ def resize(
128
+ self,
129
+ image: np.ndarray,
130
+ size: Dict[str, int],
131
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
132
+ data_format: Optional[Union[str, ChannelDimension]] = None,
133
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
134
+ **kwargs,
135
+ ) -> np.ndarray:
136
+ """
137
+ Resize an image to `(size["height"], size["width"])`.
138
+
139
+ Args:
140
+ image (`np.ndarray`):
141
+ Image to resize.
142
+ size (`Dict[str, int]`):
143
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
144
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
145
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
146
+ data_format (`ChannelDimension` or `str`, *optional*):
147
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
148
+ image is used. Can be one of:
149
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
150
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
151
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
152
+ input_data_format (`ChannelDimension` or `str`, *optional*):
153
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
154
+ from the input image. Can be one of:
155
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
156
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
157
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
158
+
159
+ Returns:
160
+ `np.ndarray`: The resized image.
161
+ """
162
+ size = get_size_dict(size)
163
+ if "height" not in size or "width" not in size:
164
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
165
+ output_size = (size["height"], size["width"])
166
+ return resize(
167
+ image,
168
+ size=output_size,
169
+ resample=resample,
170
+ data_format=data_format,
171
+ input_data_format=input_data_format,
172
+ **kwargs,
173
+ )
174
+
175
+ def preprocess(
176
+ self,
177
+ images: ImageInput,
178
+ do_resize: Optional[bool] = None,
179
+ size: Optional[Dict[str, int]] = None,
180
+ resample: PILImageResampling = None,
181
+ do_rescale: Optional[bool] = None,
182
+ rescale_factor: Optional[float] = None,
183
+ do_normalize: Optional[bool] = None,
184
+ image_mean: Optional[Union[float, List[float]]] = None,
185
+ image_std: Optional[Union[float, List[float]]] = None,
186
+ return_tensors: Optional[Union[str, TensorType]] = None,
187
+ do_convert_rgb: bool = None,
188
+ data_format: ChannelDimension = ChannelDimension.FIRST,
189
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
190
+ **kwargs,
191
+ ) -> PIL.Image.Image:
192
+ """
193
+ Preprocess an image or batch of images.
194
+
195
+ Args:
196
+ images (`ImageInput`):
197
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
198
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
199
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
200
+ Whether to resize the image.
201
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
202
+ Controls the size of the image after `resize`. The shortest edge of the image is resized to
203
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
204
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
205
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
206
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
207
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
208
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
209
+ Whether to rescale the image values between [0 - 1].
210
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
211
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
212
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
213
+ Whether to normalize the image.
214
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
215
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
216
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
217
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
218
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
219
+ Whether to convert the image to RGB.
220
+ return_tensors (`str` or `TensorType`, *optional*):
221
+ The type of tensors to return. Can be one of:
222
+ - Unset: Return a list of `np.ndarray`.
223
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
224
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
225
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
226
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
227
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
228
+ The channel dimension format for the output image. Can be one of:
229
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
230
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
231
+ - Unset: Use the channel dimension format of the input image.
232
+ input_data_format (`ChannelDimension` or `str`, *optional*):
233
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
234
+ from the input image. Can be one of:
235
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
236
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
237
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
238
+ """
239
+ do_resize = do_resize if do_resize is not None else self.do_resize
240
+ resample = resample if resample is not None else self.resample
241
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
242
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
243
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
244
+ image_mean = image_mean if image_mean is not None else self.image_mean
245
+ image_std = image_std if image_std is not None else self.image_std
246
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
247
+
248
+ size = size if size is not None else self.size
249
+ size = get_size_dict(size, default_to_square=False)
250
+
251
+ images = make_list_of_images(images)
252
+
253
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
254
+
255
+ if not valid_images(images):
256
+ raise ValueError(
257
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
258
+ "torch.Tensor, tf.Tensor or jax.ndarray."
259
+ )
260
+
261
+ validate_preprocess_arguments(
262
+ do_rescale=do_rescale,
263
+ rescale_factor=rescale_factor,
264
+ do_normalize=do_normalize,
265
+ image_mean=image_mean,
266
+ image_std=image_std,
267
+ do_resize=do_resize,
268
+ size=size,
269
+ resample=resample,
270
+ )
271
+ # PIL RGBA images are converted to RGB
272
+ if do_convert_rgb:
273
+ images = [convert_to_rgb(image) for image in images]
274
+
275
+ # All transformations expect numpy arrays.
276
+ images = [to_numpy_array(image) for image in images]
277
+
278
+ if is_scaled_image(images[0]) and do_rescale:
279
+ logger.warning_once(
280
+ "It looks like you are trying to rescale already rescaled images. If the input"
281
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
282
+ )
283
+
284
+ if input_data_format is None:
285
+ # We assume that all images have the same channel dimension format.
286
+ input_data_format = infer_channel_dimension_format(images[0])
287
+
288
+ if do_resize:
289
+ images = [
290
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
291
+ for image in images
292
+ ]
293
+
294
+ if do_rescale:
295
+ images = [
296
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
297
+ for image in images
298
+ ]
299
+
300
+ if do_normalize:
301
+ images = [
302
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
303
+ for image in images
304
+ ]
305
+
306
+ images = [
307
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
308
+ ]
309
+
310
+ encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
311
+
312
+ return encoded_outputs
venv/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py ADDED
@@ -0,0 +1,1433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BLIP model."""
16
+
17
+ import warnings
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn.functional import normalize
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...utils import (
30
+ ModelOutput,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
37
+ from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
43
+
44
+
45
+ from ..deprecated._archive_maps import BLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
46
+
47
+
48
+ # Copied from transformers.models.clip.modeling_clip.contrastive_loss
49
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
50
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
51
+
52
+
53
+ # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->blip
54
+ def blip_loss(similarity: torch.Tensor) -> torch.Tensor:
55
+ caption_loss = contrastive_loss(similarity)
56
+ image_loss = contrastive_loss(similarity.t())
57
+ return (caption_loss + image_loss) / 2.0
58
+
59
+
60
+ @dataclass
61
+ class BlipForConditionalGenerationModelOutput(ModelOutput):
62
+ """
63
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
64
+ last hidden states. This class also adds the loss term from the text decoder.
65
+
66
+ Args:
67
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
68
+ Languge modeling loss from the text decoder.
69
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
70
+ Prediction scores of the language modeling head of the text decoder model.
71
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*):
72
+ The image embeddings obtained after applying the Vision Transformer model to the input image.
73
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
74
+ Sequence of hidden-states at the output of the last layer of the model.
75
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
76
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
77
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
78
+
79
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
80
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed):
81
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
82
+ sequence_length)`.
83
+
84
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
85
+ heads.
86
+ """
87
+
88
+ loss: Optional[Tuple[torch.FloatTensor]] = None
89
+ logits: Optional[Tuple[torch.FloatTensor]] = None
90
+ image_embeds: Optional[torch.FloatTensor] = None
91
+ last_hidden_state: torch.FloatTensor = None
92
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
93
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
94
+
95
+ @property
96
+ def decoder_logits(self):
97
+ warnings.warn(
98
+ "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
99
+ " Please use the `logits` attribute to retrieve the final output instead.",
100
+ FutureWarning,
101
+ )
102
+ return self.logits
103
+
104
+
105
+ @dataclass
106
+ class BlipTextVisionModelOutput(ModelOutput):
107
+ """
108
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
109
+ last hidden states. This class also adds the loss term from the text decoder.
110
+
111
+ Args:
112
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
113
+ Languge modeling loss from the text decoder.
114
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
115
+ The image embeddings obtained by applying the projection layer to the pooler_output.
116
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
117
+ Sequence of hidden-states at the output of the last layer of the model.
118
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
119
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
120
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
121
+
122
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
123
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
124
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
125
+ sequence_length)`.
126
+
127
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
128
+ heads.
129
+ """
130
+
131
+ loss: Optional[torch.FloatTensor] = None
132
+ image_embeds: Optional[torch.FloatTensor] = None
133
+ last_hidden_state: torch.FloatTensor = None
134
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
135
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
136
+
137
+
138
+ @dataclass
139
+ class BlipImageTextMatchingModelOutput(ModelOutput):
140
+ """
141
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
142
+ last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
143
+ scores.
144
+
145
+ Args:
146
+ itm_score (`torch.FloatTensor`):
147
+ The image-text similarity scores.
148
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
149
+ Languge modeling loss from the text decoder.
150
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
151
+ The image embeddings obtained by applying the projection layer to the pooler_output.
152
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
153
+ Sequence of hidden-states at the output of the last layer of the model.
154
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
155
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
156
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
157
+
158
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
159
+ vision_pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*):
160
+ Last layer hidden-state of the vision of the vision-only branch of the model.
161
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
162
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
163
+ sequence_length)`.
164
+
165
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
166
+ heads.
167
+ question_embeds (`torch.FloatTensor`):
168
+ The question embeddings obtained by the text projection layer.
169
+ """
170
+
171
+ itm_score: Optional[torch.FloatTensor] = None
172
+ loss: Optional[torch.FloatTensor] = None
173
+ image_embeds: Optional[torch.FloatTensor] = None
174
+ last_hidden_state: torch.FloatTensor = None
175
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
176
+ vision_pooler_output: Optional[torch.FloatTensor] = None
177
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
178
+ question_embeds: Optional[Tuple[torch.FloatTensor]] = None
179
+
180
+
181
+ @dataclass
182
+ class BlipOutput(ModelOutput):
183
+ """
184
+ Args:
185
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
186
+ Contrastive loss for image-text similarity.
187
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
188
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
189
+ similarity scores.
190
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
191
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
192
+ similarity scores.
193
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
194
+ The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
195
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
196
+ The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
197
+ text_model_output(`BaseModelOutputWithPooling`):
198
+ The output of the [`BlipTextModel`].
199
+ vision_model_output(`BaseModelOutputWithPooling`):
200
+ The output of the [`BlipVisionModel`].
201
+ """
202
+
203
+ loss: Optional[torch.FloatTensor] = None
204
+ logits_per_image: torch.FloatTensor = None
205
+ logits_per_text: torch.FloatTensor = None
206
+ text_embeds: torch.FloatTensor = None
207
+ image_embeds: torch.FloatTensor = None
208
+ text_model_output: BaseModelOutputWithPooling = None
209
+ vision_model_output: BaseModelOutputWithPooling = None
210
+
211
+ def to_tuple(self) -> Tuple[Any]:
212
+ return tuple(
213
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
214
+ for k in self.keys()
215
+ )
216
+
217
+
218
+ class BlipVisionEmbeddings(nn.Module):
219
+ def __init__(self, config: BlipVisionConfig):
220
+ super().__init__()
221
+ self.config = config
222
+ self.embed_dim = config.hidden_size
223
+ self.image_size = config.image_size
224
+ self.patch_size = config.patch_size
225
+
226
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
227
+
228
+ self.patch_embedding = nn.Conv2d(
229
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
230
+ )
231
+
232
+ self.num_patches = (self.image_size // self.patch_size) ** 2
233
+ self.num_positions = self.num_patches + 1
234
+
235
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
236
+
237
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
238
+ batch_size = pixel_values.shape[0]
239
+ target_dtype = self.patch_embedding.weight.dtype
240
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
241
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
242
+
243
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
244
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
245
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
246
+ return embeddings
247
+
248
+
249
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Blip
250
+ class BlipTextEmbeddings(nn.Module):
251
+ def __init__(self, config: BlipTextConfig):
252
+ super().__init__()
253
+ embed_dim = config.hidden_size
254
+
255
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
256
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
257
+
258
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
259
+ self.register_buffer(
260
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
261
+ )
262
+
263
+ def forward(
264
+ self,
265
+ input_ids: Optional[torch.LongTensor] = None,
266
+ position_ids: Optional[torch.LongTensor] = None,
267
+ inputs_embeds: Optional[torch.FloatTensor] = None,
268
+ ) -> torch.Tensor:
269
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
270
+
271
+ if position_ids is None:
272
+ position_ids = self.position_ids[:, :seq_length]
273
+
274
+ if inputs_embeds is None:
275
+ inputs_embeds = self.token_embedding(input_ids)
276
+
277
+ position_embeddings = self.position_embedding(position_ids)
278
+ embeddings = inputs_embeds + position_embeddings
279
+
280
+ return embeddings
281
+
282
+
283
+ class BlipAttention(nn.Module):
284
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
285
+
286
+ def __init__(self, config):
287
+ super().__init__()
288
+ self.config = config
289
+ self.embed_dim = config.hidden_size
290
+ self.num_heads = config.num_attention_heads
291
+ self.head_dim = self.embed_dim // self.num_heads
292
+ if self.head_dim * self.num_heads != self.embed_dim:
293
+ raise ValueError(
294
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
295
+ f" {self.num_heads})."
296
+ )
297
+ self.scale = self.head_dim**-0.5
298
+ self.dropout = nn.Dropout(config.attention_dropout)
299
+
300
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim)
301
+
302
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
303
+
304
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
305
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ head_mask: Optional[torch.Tensor] = None,
311
+ output_attentions: Optional[bool] = False,
312
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
313
+ """Input shape: Batch x Time x Channel"""
314
+
315
+ bsz, tgt_len, embed_dim = hidden_states.size()
316
+
317
+ mixed_qkv = (
318
+ self.qkv(hidden_states)
319
+ .reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads)
320
+ .permute(2, 0, 3, 1, 4)
321
+ )
322
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
323
+
324
+ # Take the dot product between "query" and "key" to get the raw attention scores.
325
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
326
+
327
+ attention_scores = attention_scores * self.scale
328
+
329
+ # Normalize the attention scores to probabilities.
330
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
331
+
332
+ # This is actually dropping out entire tokens to attend to, which might
333
+ # seem a bit unusual, but is taken from the original Transformer paper.
334
+ attention_probs = self.dropout(attention_probs)
335
+
336
+ # Mask heads if we want to
337
+ if head_mask is not None:
338
+ attention_probs = attention_probs * head_mask
339
+
340
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
341
+
342
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
343
+ context_layer = context_layer.reshape(new_context_layer_shape)
344
+
345
+ output = self.projection(context_layer)
346
+
347
+ outputs = (output, attention_probs) if output_attentions else (output, None)
348
+
349
+ return outputs
350
+
351
+
352
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip
353
+ class BlipMLP(nn.Module):
354
+ def __init__(self, config):
355
+ super().__init__()
356
+ self.config = config
357
+ self.activation_fn = ACT2FN[config.hidden_act]
358
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
359
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
360
+
361
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
362
+ hidden_states = self.fc1(hidden_states)
363
+ hidden_states = self.activation_fn(hidden_states)
364
+ hidden_states = self.fc2(hidden_states)
365
+ return hidden_states
366
+
367
+
368
+ class BlipEncoderLayer(nn.Module):
369
+ def __init__(self, config: BlipConfig):
370
+ super().__init__()
371
+ self.embed_dim = config.hidden_size
372
+ self.self_attn = BlipAttention(config)
373
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
374
+ self.mlp = BlipMLP(config)
375
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
376
+
377
+ def forward(
378
+ self,
379
+ hidden_states: torch.Tensor,
380
+ attention_mask: torch.Tensor,
381
+ output_attentions: Optional[bool] = False,
382
+ ) -> Tuple[torch.FloatTensor]:
383
+ """
384
+ Args:
385
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
386
+ attention_mask (`torch.FloatTensor`): attention mask of size
387
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
388
+ `(config.encoder_attention_heads,)`.
389
+ output_attentions (`bool`, *optional*):
390
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
391
+ returned tensors for more detail.
392
+ """
393
+ residual = hidden_states
394
+
395
+ hidden_states = self.layer_norm1(hidden_states)
396
+ hidden_states, attn_weights = self.self_attn(
397
+ hidden_states=hidden_states,
398
+ head_mask=attention_mask,
399
+ output_attentions=output_attentions,
400
+ )
401
+ hidden_states = hidden_states + residual
402
+ residual = hidden_states
403
+ hidden_states = self.layer_norm2(hidden_states)
404
+ hidden_states = self.mlp(hidden_states)
405
+
406
+ hidden_states = hidden_states + residual
407
+
408
+ outputs = (hidden_states,)
409
+
410
+ if output_attentions:
411
+ outputs += (attn_weights,)
412
+
413
+ return outputs
414
+
415
+
416
+ class BlipPreTrainedModel(PreTrainedModel):
417
+ """
418
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
419
+ models.
420
+ """
421
+
422
+ config_class = BlipConfig
423
+ base_model_prefix = "blip"
424
+ supports_gradient_checkpointing = True
425
+
426
+ def _init_weights(self, module):
427
+ """Initialize the weights"""
428
+ factor = self.config.initializer_range
429
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
430
+ module.weight.data.normal_(mean=0.0, std=factor)
431
+ if hasattr(module, "bias") and module.bias is not None:
432
+ module.bias.data.zero_()
433
+
434
+ if isinstance(module, BlipVisionEmbeddings):
435
+ if hasattr(self.config, "vision_config"):
436
+ factor = self.config.vision_config.initializer_range
437
+ nn.init.trunc_normal_(
438
+ module.position_embedding,
439
+ mean=0.0,
440
+ std=factor,
441
+ )
442
+
443
+ nn.init.trunc_normal_(
444
+ module.class_embedding,
445
+ mean=0.0,
446
+ std=factor,
447
+ )
448
+
449
+ elif isinstance(module, nn.LayerNorm):
450
+ module.bias.data.zero_()
451
+ module.weight.data.fill_(1.0)
452
+ elif isinstance(module, nn.Linear) and module.bias is not None:
453
+ module.bias.data.zero_()
454
+
455
+
456
+ BLIP_START_DOCSTRING = r"""
457
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
458
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
459
+ etc.)
460
+
461
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
462
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
463
+ and behavior.
464
+
465
+ Parameters:
466
+ config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
467
+ Initializing with a config file does not load the weights associated with the model, only the
468
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
469
+ """
470
+
471
+ BLIP_TEXT_INPUTS_DOCSTRING = r"""
472
+ Args:
473
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
474
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
475
+ it.
476
+
477
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
478
+
479
+ [What are input IDs?](../glossary#input-ids)
480
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
481
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
482
+
483
+ - 1 for tokens that are **not masked**,
484
+ - 0 for tokens that are **masked**.
485
+
486
+ [What are attention masks?](../glossary#attention-mask)
487
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
488
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
489
+ config.max_position_embeddings - 1]`.
490
+
491
+ [What are position IDs?](../glossary#position-ids)
492
+ output_attentions (`bool`, *optional*):
493
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
494
+ tensors for more detail.
495
+ output_hidden_states (`bool`, *optional*):
496
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
497
+ more detail.
498
+ return_dict (`bool`, *optional*):
499
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
500
+ """
501
+
502
+ BLIP_VISION_INPUTS_DOCSTRING = r"""
503
+ Args:
504
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
505
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
506
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
507
+ output_attentions (`bool`, *optional*):
508
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
509
+ tensors for more detail.
510
+ output_hidden_states (`bool`, *optional*):
511
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
512
+ more detail.
513
+ return_dict (`bool`, *optional*):
514
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
515
+ """
516
+
517
+ BLIP_INPUTS_DOCSTRING = r"""
518
+ Args:
519
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
520
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
521
+ it.
522
+
523
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
524
+
525
+ [What are input IDs?](../glossary#input-ids)
526
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
527
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
528
+
529
+ - 1 for tokens that are **not masked**,
530
+ - 0 for tokens that are **masked**.
531
+
532
+ [What are attention masks?](../glossary#attention-mask)
533
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
534
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
535
+ config.max_position_embeddings - 1]`.
536
+
537
+ [What are position IDs?](../glossary#position-ids)
538
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
539
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
540
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
541
+ return_loss (`bool`, *optional*):
542
+ Whether or not to return the contrastive loss.
543
+ output_attentions (`bool`, *optional*):
544
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
545
+ tensors for more detail.
546
+ output_hidden_states (`bool`, *optional*):
547
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
548
+ more detail.
549
+ return_dict (`bool`, *optional*):
550
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
551
+ """
552
+
553
+
554
+ class BlipEncoder(nn.Module):
555
+ """
556
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
557
+ [`BlipEncoderLayer`].
558
+
559
+ Args:
560
+ config (`BlipConfig`):
561
+ The corresponding vision configuration for the `BlipEncoder`.
562
+ """
563
+
564
+ def __init__(self, config: BlipConfig):
565
+ super().__init__()
566
+ self.config = config
567
+ self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
568
+ self.gradient_checkpointing = False
569
+
570
+ def forward(
571
+ self,
572
+ inputs_embeds,
573
+ attention_mask: Optional[torch.Tensor] = None,
574
+ output_attentions: Optional[bool] = None,
575
+ output_hidden_states: Optional[bool] = None,
576
+ return_dict: Optional[bool] = None,
577
+ ) -> Union[Tuple, BaseModelOutput]:
578
+ r"""
579
+ Args:
580
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
581
+ Embedded representation of the inputs. Should be float, not int tokens.
582
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
583
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
584
+
585
+ - 1 for tokens that are **not masked**,
586
+ - 0 for tokens that are **masked**.
587
+
588
+ [What are attention masks?](../glossary#attention-mask)
589
+ output_attentions (`bool`, *optional*):
590
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
591
+ returned tensors for more detail.
592
+ output_hidden_states (`bool`, *optional*):
593
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
594
+ for more detail.
595
+ return_dict (`bool`, *optional*):
596
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
597
+ """
598
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
599
+ output_hidden_states = (
600
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
601
+ )
602
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
603
+
604
+ encoder_states = () if output_hidden_states else None
605
+ all_attentions = () if output_attentions else None
606
+
607
+ hidden_states = inputs_embeds
608
+ for idx, encoder_layer in enumerate(self.layers):
609
+ if output_hidden_states:
610
+ encoder_states = encoder_states + (hidden_states,)
611
+ if self.gradient_checkpointing and self.training:
612
+ layer_outputs = self._gradient_checkpointing_func(
613
+ encoder_layer.__call__,
614
+ hidden_states,
615
+ attention_mask,
616
+ output_attentions,
617
+ )
618
+ else:
619
+ layer_outputs = encoder_layer(
620
+ hidden_states,
621
+ attention_mask,
622
+ output_attentions=output_attentions,
623
+ )
624
+
625
+ hidden_states = layer_outputs[0]
626
+
627
+ if output_attentions:
628
+ all_attentions = all_attentions + (layer_outputs[1],)
629
+
630
+ if output_hidden_states:
631
+ encoder_states = encoder_states + (hidden_states,)
632
+
633
+ if not return_dict:
634
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
635
+ return BaseModelOutput(
636
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
637
+ )
638
+
639
+
640
+ class BlipVisionModel(BlipPreTrainedModel):
641
+ main_input_name = "pixel_values"
642
+ config_class = BlipVisionConfig
643
+
644
+ def __init__(self, config: BlipVisionConfig):
645
+ super().__init__(config)
646
+ self.config = config
647
+ embed_dim = config.hidden_size
648
+
649
+ self.embeddings = BlipVisionEmbeddings(config)
650
+ self.encoder = BlipEncoder(config)
651
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
652
+
653
+ self.post_init()
654
+
655
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
656
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=BlipVisionConfig)
657
+ def forward(
658
+ self,
659
+ pixel_values: Optional[torch.FloatTensor] = None,
660
+ output_attentions: Optional[bool] = None,
661
+ output_hidden_states: Optional[bool] = None,
662
+ return_dict: Optional[bool] = None,
663
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
664
+ r"""
665
+ Returns:
666
+
667
+ """
668
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
669
+ output_hidden_states = (
670
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
671
+ )
672
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
673
+
674
+ if pixel_values is None:
675
+ raise ValueError("You have to specify pixel_values")
676
+
677
+ hidden_states = self.embeddings(pixel_values)
678
+
679
+ encoder_outputs = self.encoder(
680
+ inputs_embeds=hidden_states,
681
+ output_attentions=output_attentions,
682
+ output_hidden_states=output_hidden_states,
683
+ return_dict=return_dict,
684
+ )
685
+
686
+ last_hidden_state = encoder_outputs[0]
687
+ last_hidden_state = self.post_layernorm(last_hidden_state)
688
+
689
+ pooled_output = last_hidden_state[:, 0, :]
690
+ pooled_output = self.post_layernorm(pooled_output)
691
+
692
+ if not return_dict:
693
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
694
+
695
+ return BaseModelOutputWithPooling(
696
+ last_hidden_state=last_hidden_state,
697
+ pooler_output=pooled_output,
698
+ hidden_states=encoder_outputs.hidden_states,
699
+ attentions=encoder_outputs.attentions,
700
+ )
701
+
702
+ def get_input_embeddings(self):
703
+ return self.embeddings
704
+
705
+
706
+ @add_start_docstrings(BLIP_START_DOCSTRING)
707
+ class BlipModel(BlipPreTrainedModel):
708
+ config_class = BlipConfig
709
+
710
+ def __init__(self, config: BlipConfig):
711
+ super().__init__(config)
712
+
713
+ if not isinstance(config.text_config, BlipTextConfig):
714
+ raise ValueError(
715
+ "config.text_config is expected to be of type BlipTextConfig but is of type"
716
+ f" {type(config.text_config)}."
717
+ )
718
+
719
+ if not isinstance(config.vision_config, BlipVisionConfig):
720
+ raise ValueError(
721
+ "config.vision_config is expected to be of type BlipVisionConfig but is of type"
722
+ f" {type(config.vision_config)}."
723
+ )
724
+
725
+ text_config = config.text_config
726
+ vision_config = config.vision_config
727
+
728
+ self.projection_dim = config.projection_dim
729
+ self.text_embed_dim = text_config.hidden_size
730
+ self.vision_embed_dim = vision_config.hidden_size
731
+
732
+ self.text_model = BlipTextModel(text_config)
733
+ self.vision_model = BlipVisionModel(vision_config)
734
+
735
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
736
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
737
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
738
+
739
+ # Initialize weights and apply final processing
740
+ self.post_init()
741
+
742
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
743
+ def get_text_features(
744
+ self,
745
+ input_ids: Optional[torch.Tensor] = None,
746
+ attention_mask: Optional[torch.Tensor] = None,
747
+ position_ids: Optional[torch.Tensor] = None,
748
+ return_dict: Optional[bool] = None,
749
+ ) -> torch.FloatTensor:
750
+ r"""
751
+ Returns:
752
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
753
+ applying the projection layer to the pooled output of [`BlipTextModel`].
754
+
755
+ Examples:
756
+
757
+ ```python
758
+ >>> from transformers import AutoProcessor, BlipModel
759
+
760
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
761
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
762
+
763
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
764
+ >>> text_features = model.get_text_features(**inputs)
765
+ ```"""
766
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
767
+
768
+ text_outputs = self.text_model(
769
+ input_ids=input_ids,
770
+ attention_mask=attention_mask,
771
+ position_ids=position_ids,
772
+ return_dict=return_dict,
773
+ )
774
+
775
+ pooled_output = text_outputs[1]
776
+ text_features = self.text_projection(pooled_output)
777
+
778
+ return text_features
779
+
780
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
781
+ def get_image_features(
782
+ self,
783
+ pixel_values: Optional[torch.FloatTensor] = None,
784
+ return_dict: Optional[bool] = None,
785
+ ) -> torch.FloatTensor:
786
+ r"""
787
+ Returns:
788
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
789
+ applying the projection layer to the pooled output of [`BlipVisionModel`].
790
+
791
+ Examples:
792
+
793
+ ```python
794
+ >>> from PIL import Image
795
+ >>> import requests
796
+ >>> from transformers import AutoProcessor, BlipModel
797
+
798
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
799
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
800
+
801
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
802
+ >>> image = Image.open(requests.get(url, stream=True).raw)
803
+
804
+ >>> inputs = processor(images=image, return_tensors="pt")
805
+
806
+ >>> image_features = model.get_image_features(**inputs)
807
+ ```"""
808
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
809
+
810
+ vision_outputs = self.vision_model(pixel_values=pixel_values, return_dict=return_dict)
811
+
812
+ pooled_output = vision_outputs[1] # pooled_output
813
+ image_features = self.visual_projection(pooled_output)
814
+
815
+ return image_features
816
+
817
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
818
+ @replace_return_docstrings(output_type=BlipOutput, config_class=BlipConfig)
819
+ def forward(
820
+ self,
821
+ input_ids: Optional[torch.LongTensor] = None,
822
+ pixel_values: Optional[torch.FloatTensor] = None,
823
+ attention_mask: Optional[torch.Tensor] = None,
824
+ position_ids: Optional[torch.LongTensor] = None,
825
+ return_loss: Optional[bool] = None,
826
+ output_attentions: Optional[bool] = None,
827
+ output_hidden_states: Optional[bool] = None,
828
+ return_dict: Optional[bool] = None,
829
+ ) -> Union[Tuple, BlipOutput]:
830
+ r"""
831
+ Returns:
832
+
833
+ Examples:
834
+
835
+ ```python
836
+ >>> from PIL import Image
837
+ >>> import requests
838
+ >>> from transformers import AutoProcessor, BlipModel
839
+
840
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
841
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
842
+
843
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
844
+ >>> image = Image.open(requests.get(url, stream=True).raw)
845
+
846
+ >>> inputs = processor(
847
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
848
+ ... )
849
+
850
+ >>> outputs = model(**inputs)
851
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
852
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
853
+ ```"""
854
+ # Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
858
+ )
859
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
860
+
861
+ vision_outputs = self.vision_model(
862
+ pixel_values=pixel_values,
863
+ output_attentions=output_attentions,
864
+ output_hidden_states=output_hidden_states,
865
+ return_dict=return_dict,
866
+ )
867
+
868
+ text_outputs = self.text_model(
869
+ input_ids=input_ids,
870
+ attention_mask=attention_mask,
871
+ position_ids=position_ids,
872
+ output_attentions=output_attentions,
873
+ output_hidden_states=output_hidden_states,
874
+ return_dict=return_dict,
875
+ )
876
+
877
+ image_embeds = vision_outputs[1]
878
+ image_embeds = self.visual_projection(image_embeds)
879
+
880
+ text_embeds = text_outputs[1]
881
+ text_embeds = self.text_projection(text_embeds)
882
+
883
+ # normalized features
884
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
885
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
886
+
887
+ # cosine similarity as logits
888
+ logit_scale = self.logit_scale.exp()
889
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
890
+ logits_per_image = logits_per_text.t()
891
+
892
+ loss = None
893
+ if return_loss:
894
+ loss = blip_loss(logits_per_text)
895
+
896
+ if not return_dict:
897
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
898
+ return ((loss,) + output) if loss is not None else output
899
+
900
+ return BlipOutput(
901
+ loss=loss,
902
+ logits_per_image=logits_per_image,
903
+ logits_per_text=logits_per_text,
904
+ text_embeds=text_embeds,
905
+ image_embeds=image_embeds,
906
+ text_model_output=text_outputs,
907
+ vision_model_output=vision_outputs,
908
+ )
909
+
910
+
911
+ @add_start_docstrings(
912
+ """
913
+ BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
914
+ `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
915
+ the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
916
+ from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
917
+ """,
918
+ BLIP_START_DOCSTRING,
919
+ )
920
+ class BlipForConditionalGeneration(BlipPreTrainedModel):
921
+ config_class = BlipConfig
922
+ _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"]
923
+ main_input_name = "pixel_values"
924
+
925
+ def __init__(self, config: BlipConfig):
926
+ super().__init__(config)
927
+
928
+ self.vision_model = BlipVisionModel(config.vision_config)
929
+
930
+ self.text_decoder = BlipTextLMHeadModel(config.text_config)
931
+
932
+ self.decoder_input_ids = config.text_config.bos_token_id
933
+ self.decoder_pad_token_id = config.text_config.pad_token_id
934
+
935
+ # Initialize weights and apply final processing
936
+ self.post_init()
937
+
938
+ def get_input_embeddings(self) -> nn.Module:
939
+ return self.vision_model.embeddings.patch_embedding
940
+
941
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
942
+ @replace_return_docstrings(output_type=BlipForConditionalGenerationModelOutput, config_class=BlipVisionConfig)
943
+ def forward(
944
+ self,
945
+ pixel_values: torch.FloatTensor,
946
+ input_ids: Optional[torch.LongTensor] = None,
947
+ attention_mask: Optional[torch.LongTensor] = None,
948
+ output_attentions: Optional[bool] = None,
949
+ output_hidden_states: Optional[bool] = None,
950
+ labels: Optional[torch.LongTensor] = None,
951
+ return_dict: Optional[bool] = None,
952
+ ) -> Union[Tuple, BlipForConditionalGenerationModelOutput]:
953
+ r"""
954
+ Returns:
955
+
956
+ Examples:
957
+
958
+ ```python
959
+ >>> from PIL import Image
960
+ >>> import requests
961
+ >>> from transformers import AutoProcessor, BlipForConditionalGeneration
962
+
963
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
964
+ >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
965
+
966
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
967
+ >>> image = Image.open(requests.get(url, stream=True).raw)
968
+ >>> text = "A picture of"
969
+
970
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
971
+
972
+ >>> outputs = model(**inputs)
973
+ ```"""
974
+
975
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
976
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
977
+ output_hidden_states = (
978
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
979
+ )
980
+
981
+ vision_outputs = self.vision_model(
982
+ pixel_values=pixel_values,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+
988
+ image_embeds = vision_outputs[0]
989
+
990
+ outputs = self.text_decoder(
991
+ input_ids=input_ids,
992
+ attention_mask=attention_mask,
993
+ encoder_hidden_states=image_embeds,
994
+ labels=labels,
995
+ return_dict=return_dict,
996
+ reduction="mean",
997
+ )
998
+
999
+ if not return_dict:
1000
+ outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]
1001
+ return tuple(output for output in outputs if output is not None)
1002
+
1003
+ return BlipForConditionalGenerationModelOutput(
1004
+ loss=outputs.loss,
1005
+ logits=outputs.logits,
1006
+ image_embeds=image_embeds,
1007
+ last_hidden_state=vision_outputs.last_hidden_state,
1008
+ hidden_states=vision_outputs.hidden_states,
1009
+ attentions=vision_outputs.attentions,
1010
+ )
1011
+
1012
+ @torch.no_grad()
1013
+ def generate(
1014
+ self,
1015
+ pixel_values: torch.FloatTensor,
1016
+ input_ids: Optional[torch.LongTensor] = None,
1017
+ attention_mask: Optional[torch.LongTensor] = None,
1018
+ **generate_kwargs,
1019
+ ) -> torch.LongTensor:
1020
+ r"""
1021
+ Overrides *generate* function to be able to use the model as a conditional generator
1022
+
1023
+ Parameters:
1024
+ pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*:
1025
+ Input image to be processed
1026
+ input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
1027
+ The sequence used as a prompt for the generation.
1028
+ attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
1029
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1030
+
1031
+
1032
+ Examples:
1033
+ ```python
1034
+ >>> from PIL import Image
1035
+ >>> import requests
1036
+ >>> from transformers import AutoProcessor, BlipForConditionalGeneration
1037
+
1038
+ >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
1039
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1040
+
1041
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1042
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1043
+
1044
+ >>> inputs = processor(images=image, return_tensors="pt")
1045
+
1046
+ >>> outputs = model.generate(**inputs)
1047
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1048
+ two cats sleeping on a couch
1049
+ ```
1050
+ """
1051
+
1052
+ batch_size = pixel_values.shape[0]
1053
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1054
+
1055
+ image_embeds = vision_outputs[0]
1056
+
1057
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
1058
+
1059
+ if isinstance(input_ids, list):
1060
+ input_ids = torch.LongTensor(input_ids)
1061
+ elif input_ids is None:
1062
+ input_ids = (
1063
+ torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]])
1064
+ .repeat(batch_size, 1)
1065
+ .to(image_embeds.device)
1066
+ )
1067
+
1068
+ input_ids[:, 0] = self.config.text_config.bos_token_id
1069
+ attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
1070
+
1071
+ outputs = self.text_decoder.generate(
1072
+ input_ids=input_ids[:, :-1],
1073
+ eos_token_id=self.config.text_config.sep_token_id,
1074
+ pad_token_id=self.config.text_config.pad_token_id,
1075
+ attention_mask=attention_mask,
1076
+ encoder_hidden_states=image_embeds,
1077
+ encoder_attention_mask=image_attention_mask,
1078
+ **generate_kwargs,
1079
+ )
1080
+
1081
+ return outputs
1082
+
1083
+
1084
+ @add_start_docstrings(
1085
+ """
1086
+ BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
1087
+ decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
1088
+ with the encoding of the image, and the text decoder will output the answer to the question.
1089
+ """,
1090
+ BLIP_START_DOCSTRING,
1091
+ )
1092
+ class BlipForQuestionAnswering(BlipPreTrainedModel):
1093
+ config_class = BlipConfig
1094
+ _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"]
1095
+
1096
+ def __init__(self, config: BlipConfig):
1097
+ super().__init__(config)
1098
+
1099
+ self.vision_model = BlipVisionModel(config.vision_config)
1100
+
1101
+ self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
1102
+
1103
+ self.text_decoder = BlipTextLMHeadModel(config.text_config)
1104
+
1105
+ self.decoder_pad_token_id = config.text_config.pad_token_id
1106
+ self.decoder_start_token_id = config.text_config.bos_token_id
1107
+
1108
+ # Initialize weights and apply final processing
1109
+ self.post_init()
1110
+
1111
+ def get_input_embeddings(self) -> nn.Module:
1112
+ return self.vision_model.embeddings.patch_embedding
1113
+
1114
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1115
+ @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
1116
+ def forward(
1117
+ self,
1118
+ input_ids: torch.LongTensor,
1119
+ pixel_values: torch.FloatTensor,
1120
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1121
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1122
+ attention_mask: Optional[torch.LongTensor] = None,
1123
+ output_attentions: Optional[bool] = None,
1124
+ output_hidden_states: Optional[bool] = None,
1125
+ labels: Optional[torch.LongTensor] = None,
1126
+ return_dict: Optional[bool] = None,
1127
+ ) -> Union[Tuple, BlipTextVisionModelOutput]:
1128
+ r"""
1129
+ Returns:
1130
+
1131
+ Examples:
1132
+
1133
+ ```python
1134
+ >>> from PIL import Image
1135
+ >>> import requests
1136
+ >>> from transformers import AutoProcessor, BlipForQuestionAnswering
1137
+
1138
+ >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1139
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1140
+
1141
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1142
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1143
+
1144
+ >>> # training
1145
+ >>> text = "How many cats are in the picture?"
1146
+ >>> label = "2"
1147
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1148
+ >>> labels = processor(text=label, return_tensors="pt").input_ids
1149
+
1150
+ >>> inputs["labels"] = labels
1151
+ >>> outputs = model(**inputs)
1152
+ >>> loss = outputs.loss
1153
+ >>> loss.backward()
1154
+
1155
+ >>> # inference
1156
+ >>> text = "How many cats are in the picture?"
1157
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1158
+ >>> outputs = model.generate(**inputs)
1159
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1160
+ 2
1161
+ ```"""
1162
+ if labels is None and decoder_input_ids is None:
1163
+ raise ValueError(
1164
+ "Either `decoder_input_ids` or `labels` should be passed when calling `forward` with"
1165
+ " `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
1166
+ " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`"
1167
+ )
1168
+
1169
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1170
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1171
+ output_hidden_states = (
1172
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1173
+ )
1174
+
1175
+ vision_outputs = self.vision_model(
1176
+ pixel_values=pixel_values,
1177
+ output_attentions=output_attentions,
1178
+ output_hidden_states=output_hidden_states,
1179
+ return_dict=return_dict,
1180
+ )
1181
+
1182
+ image_embeds = vision_outputs[0]
1183
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
1184
+
1185
+ question_embeds = self.text_encoder(
1186
+ input_ids=input_ids,
1187
+ attention_mask=attention_mask,
1188
+ encoder_hidden_states=image_embeds,
1189
+ encoder_attention_mask=image_attention_mask,
1190
+ return_dict=return_dict,
1191
+ )
1192
+
1193
+ if labels is not None and decoder_input_ids is None:
1194
+ # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153
1195
+ decoder_input_ids = labels
1196
+
1197
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1198
+
1199
+ answer_output = self.text_decoder(
1200
+ input_ids=decoder_input_ids,
1201
+ attention_mask=decoder_attention_mask,
1202
+ encoder_hidden_states=question_embeds,
1203
+ encoder_attention_mask=attention_mask,
1204
+ labels=labels,
1205
+ return_dict=return_dict,
1206
+ reduction="mean",
1207
+ )
1208
+
1209
+ if labels is not None:
1210
+ decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean()
1211
+ else:
1212
+ decoder_loss = None
1213
+
1214
+ if not return_dict:
1215
+ outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
1216
+ return tuple(output for output in outputs if output is not None)
1217
+
1218
+ return BlipTextVisionModelOutput(
1219
+ loss=decoder_loss,
1220
+ image_embeds=image_embeds,
1221
+ last_hidden_state=vision_outputs.last_hidden_state,
1222
+ hidden_states=vision_outputs.hidden_states,
1223
+ attentions=vision_outputs.attentions,
1224
+ )
1225
+
1226
+ @torch.no_grad()
1227
+ def generate(
1228
+ self,
1229
+ input_ids: torch.LongTensor,
1230
+ pixel_values: torch.FloatTensor,
1231
+ attention_mask: Optional[torch.LongTensor] = None,
1232
+ **generate_kwargs,
1233
+ ) -> torch.LongTensor:
1234
+ r"""
1235
+ Overrides *generate* function to be able to use the model as a conditional generator
1236
+
1237
+ Parameters:
1238
+ input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*):
1239
+ The sequence used as a prompt for the generation.
1240
+ pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*:
1241
+ Input image to be processed
1242
+ attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
1243
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
1244
+ tokens that are NOT MASKED, `0` for MASKED tokens.
1245
+ **generate_kwargs:
1246
+ Additional arguments passed to the *generate* function of the decoder
1247
+
1248
+
1249
+ Examples:
1250
+ ```python
1251
+ >>> from PIL import Image
1252
+ >>> import requests
1253
+ >>> from transformers import AutoProcessor, BlipForQuestionAnswering
1254
+
1255
+ >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1256
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1257
+
1258
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1259
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1260
+ >>> text = "How many cats are in the picture?"
1261
+
1262
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1263
+
1264
+ >>> outputs = model.generate(**inputs)
1265
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1266
+ 2
1267
+ ```
1268
+ """
1269
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1270
+
1271
+ image_embeds = vision_outputs[0]
1272
+
1273
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
1274
+
1275
+ if isinstance(input_ids, list):
1276
+ input_ids = torch.LongTensor(input_ids)
1277
+
1278
+ question_outputs = self.text_encoder(
1279
+ input_ids=input_ids,
1280
+ attention_mask=attention_mask,
1281
+ encoder_hidden_states=image_embeds,
1282
+ encoder_attention_mask=image_attention_mask,
1283
+ return_dict=False,
1284
+ )
1285
+
1286
+ question_embeds = question_outputs[0]
1287
+
1288
+ question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long).to(question_embeds.device)
1289
+
1290
+ bos_ids = torch.full(
1291
+ (question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device
1292
+ )
1293
+
1294
+ outputs = self.text_decoder.generate(
1295
+ input_ids=bos_ids,
1296
+ eos_token_id=self.config.text_config.sep_token_id,
1297
+ pad_token_id=self.config.text_config.pad_token_id,
1298
+ encoder_hidden_states=question_embeds,
1299
+ encoder_attention_mask=question_attention_mask,
1300
+ **generate_kwargs,
1301
+ )
1302
+
1303
+ return outputs
1304
+
1305
+
1306
+ @add_start_docstrings(
1307
+ """
1308
+ BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
1309
+ image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
1310
+ the image.
1311
+ """,
1312
+ BLIP_START_DOCSTRING,
1313
+ )
1314
+ class BlipForImageTextRetrieval(BlipPreTrainedModel):
1315
+ config_class = BlipConfig
1316
+
1317
+ def __init__(self, config: BlipConfig):
1318
+ super().__init__(config)
1319
+
1320
+ self.vision_model = BlipVisionModel(config.vision_config)
1321
+
1322
+ self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
1323
+
1324
+ # vision projection layer
1325
+ self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size)
1326
+
1327
+ # text projection layer
1328
+ self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size)
1329
+
1330
+ # image text matching head
1331
+ self.itm_head = nn.Linear(config.text_config.hidden_size, 2)
1332
+
1333
+ self.decoder_pad_token_id = (
1334
+ config.text_config.pad_token_id
1335
+ if not hasattr(config, "decoder_pad_token_id")
1336
+ else config.decoder_pad_token_id
1337
+ )
1338
+ self.decoder_start_token_id = (
1339
+ config.text_config.bos_token_id
1340
+ if not hasattr(config, "decoder_start_token_id")
1341
+ else config.decoder_start_token_id
1342
+ )
1343
+
1344
+ # Initialize weights and apply final processing
1345
+ self.post_init()
1346
+
1347
+ def get_input_embeddings(self) -> nn.Module:
1348
+ return self.vision_model.embeddings.patch_embedding
1349
+
1350
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1351
+ @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
1352
+ def forward(
1353
+ self,
1354
+ input_ids: torch.LongTensor,
1355
+ pixel_values: torch.FloatTensor,
1356
+ use_itm_head: Optional[bool] = True,
1357
+ attention_mask: Optional[torch.LongTensor] = None,
1358
+ output_attentions: Optional[bool] = None,
1359
+ output_hidden_states: Optional[bool] = None,
1360
+ return_dict: Optional[bool] = None,
1361
+ ) -> Union[Tuple, BlipTextVisionModelOutput]:
1362
+ r"""
1363
+ Returns:
1364
+
1365
+ Examples:
1366
+
1367
+ ```python
1368
+ >>> from PIL import Image
1369
+ >>> import requests
1370
+ >>> from transformers import AutoProcessor, BlipForImageTextRetrieval
1371
+
1372
+ >>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
1373
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
1374
+
1375
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1376
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1377
+ >>> text = "an image of a cat"
1378
+
1379
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1380
+ >>> outputs = model(**inputs)
1381
+ ```
1382
+ """
1383
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1384
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1385
+ output_hidden_states = (
1386
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1387
+ )
1388
+
1389
+ vision_outputs = self.vision_model(
1390
+ pixel_values=pixel_values,
1391
+ output_attentions=output_attentions,
1392
+ output_hidden_states=output_hidden_states,
1393
+ return_dict=return_dict,
1394
+ )
1395
+
1396
+ image_embeds = vision_outputs[0]
1397
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
1398
+
1399
+ if use_itm_head:
1400
+ question_embeds = self.text_encoder(
1401
+ input_ids=input_ids,
1402
+ attention_mask=attention_mask,
1403
+ encoder_hidden_states=image_embeds,
1404
+ encoder_attention_mask=image_atts,
1405
+ return_dict=return_dict,
1406
+ )
1407
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1408
+
1409
+ output = self.itm_head(question_embeds[:, 0, :])
1410
+ else:
1411
+ question_embeds = self.text_encoder(
1412
+ input_ids=input_ids,
1413
+ attention_mask=attention_mask,
1414
+ return_dict=return_dict,
1415
+ )
1416
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1417
+
1418
+ image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
1419
+ text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1)
1420
+
1421
+ output = image_feat @ text_feat.t()
1422
+
1423
+ if not return_dict:
1424
+ outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
1425
+ return tuple(output for output in outputs if output is not None)
1426
+
1427
+ return BlipImageTextMatchingModelOutput(
1428
+ itm_score=output,
1429
+ last_hidden_state=vision_outputs.last_hidden_state,
1430
+ hidden_states=vision_outputs.hidden_states,
1431
+ attentions=vision_outputs.attentions,
1432
+ question_embeds=question_embeds,
1433
+ )
venv/lib/python3.10/site-packages/transformers/models/blip/modeling_blip_text.py ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the BSD-3-clause license (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # https://opensource.org/licenses/BSD-3-Clause
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import Tensor, device, nn
23
+ from torch.nn import CrossEntropyLoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ CausalLMOutputWithCrossAttentions,
30
+ )
31
+ from ...modeling_utils import (
32
+ PreTrainedModel,
33
+ apply_chunking_to_forward,
34
+ find_pruneable_heads_and_indices,
35
+ prune_linear_layer,
36
+ )
37
+ from ...utils import logging
38
+ from .configuration_blip import BlipTextConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L52
45
+ class BlipTextEmbeddings(nn.Module):
46
+ """Construct the embeddings from word and position embeddings."""
47
+
48
+ def __init__(self, config):
49
+ super().__init__()
50
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
51
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
52
+
53
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
54
+ # any TensorFlow checkpoint file
55
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
56
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
57
+
58
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
59
+ self.register_buffer(
60
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
61
+ )
62
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
63
+
64
+ self.config = config
65
+
66
+ def forward(
67
+ self,
68
+ input_ids: Optional[torch.LongTensor] = None,
69
+ position_ids: Optional[torch.LongTensor] = None,
70
+ inputs_embeds: Optional[torch.FloatTensor] = None,
71
+ past_key_values_length: int = 0,
72
+ ) -> torch.Tensor:
73
+ if input_ids is not None:
74
+ input_shape = input_ids.size()
75
+ else:
76
+ input_shape = inputs_embeds.size()[:-1]
77
+
78
+ seq_length = input_shape[1]
79
+
80
+ if position_ids is None:
81
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
82
+
83
+ if inputs_embeds is None:
84
+ input_ids = input_ids.to(self.word_embeddings.weight.device)
85
+ inputs_embeds = self.word_embeddings(input_ids)
86
+
87
+ embeddings = inputs_embeds
88
+
89
+ if self.position_embedding_type == "absolute":
90
+ position_embeddings = self.position_embeddings(position_ids)
91
+ embeddings += position_embeddings
92
+ embeddings = self.LayerNorm(embeddings)
93
+ embeddings = self.dropout(embeddings)
94
+ return embeddings
95
+
96
+
97
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L97
98
+ class BlipTextSelfAttention(nn.Module):
99
+ def __init__(self, config, is_cross_attention):
100
+ super().__init__()
101
+ self.config = config
102
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
103
+ raise ValueError(
104
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
105
+ % (config.hidden_size, config.num_attention_heads)
106
+ )
107
+
108
+ self.num_attention_heads = config.num_attention_heads
109
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
110
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
111
+
112
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
113
+ if is_cross_attention:
114
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
115
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
116
+ else:
117
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
118
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
119
+
120
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
121
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
122
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
123
+ self.max_position_embeddings = config.max_position_embeddings
124
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
125
+
126
+ def save_attn_gradients(self, attn_gradients):
127
+ self.attn_gradients = attn_gradients
128
+
129
+ def get_attn_gradients(self):
130
+ return self.attn_gradients
131
+
132
+ def save_attention_map(self, attention_map):
133
+ self.attention_map = attention_map
134
+
135
+ def get_attention_map(self):
136
+ return self.attention_map
137
+
138
+ def transpose_for_scores(self, x):
139
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
140
+ x = x.view(*new_x_shape)
141
+ return x.permute(0, 2, 1, 3)
142
+
143
+ def forward(
144
+ self,
145
+ hidden_states: torch.Tensor,
146
+ attention_mask: Optional[torch.FloatTensor] = None,
147
+ head_mask: Optional[torch.FloatTensor] = None,
148
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
149
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
150
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
151
+ output_attentions: Optional[bool] = False,
152
+ ) -> Tuple[torch.Tensor]:
153
+ mixed_query_layer = self.query(hidden_states)
154
+
155
+ # If this is instantiated as a cross-attention module, the keys
156
+ # and values come from an encoder; the attention mask needs to be
157
+ # such that the encoder's padding tokens are not attended to.
158
+ is_cross_attention = encoder_hidden_states is not None
159
+
160
+ if is_cross_attention:
161
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
162
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
163
+ attention_mask = encoder_attention_mask
164
+ elif past_key_value is not None:
165
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
166
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
167
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
168
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
169
+ else:
170
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
171
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
172
+
173
+ query_layer = self.transpose_for_scores(mixed_query_layer)
174
+
175
+ past_key_value = (key_layer, value_layer)
176
+
177
+ # Take the dot product between "query" and "key" to get the raw attention scores.
178
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
179
+
180
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
181
+ seq_length = hidden_states.size()[1]
182
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
183
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
184
+ distance = position_ids_l - position_ids_r
185
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
186
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
187
+
188
+ if self.position_embedding_type == "relative_key":
189
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
190
+ attention_scores = attention_scores + relative_position_scores
191
+ elif self.position_embedding_type == "relative_key_query":
192
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
193
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
194
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
195
+
196
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
197
+ if attention_mask is not None:
198
+ # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function)
199
+ attention_scores = attention_scores + attention_mask.to(attention_scores.device)
200
+
201
+ # Normalize the attention scores to probabilities.
202
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
203
+
204
+ # This is actually dropping out entire tokens to attend to, which might
205
+ # seem a bit unusual, but is taken from the original Transformer paper.
206
+ attention_probs_dropped = self.dropout(attention_probs)
207
+
208
+ # Mask heads if we want to
209
+ if head_mask is not None:
210
+ attention_probs_dropped = attention_probs_dropped * head_mask
211
+
212
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
213
+
214
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
215
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
216
+ context_layer = context_layer.view(*new_context_layer_shape)
217
+
218
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
219
+
220
+ outputs = outputs + (past_key_value,)
221
+ return outputs
222
+
223
+
224
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert -> BlipText
225
+ class BlipTextSelfOutput(nn.Module):
226
+ def __init__(self, config):
227
+ super().__init__()
228
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
229
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
230
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
231
+
232
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
233
+ hidden_states = self.dense(hidden_states)
234
+ hidden_states = self.dropout(hidden_states)
235
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
236
+ return hidden_states
237
+
238
+
239
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#242
240
+ class BlipTextAttention(nn.Module):
241
+ def __init__(self, config, is_cross_attention=False):
242
+ super().__init__()
243
+ self.self = BlipTextSelfAttention(config, is_cross_attention)
244
+ self.output = BlipTextSelfOutput(config)
245
+ self.pruned_heads = set()
246
+
247
+ def prune_heads(self, heads):
248
+ if len(heads) == 0:
249
+ return
250
+ heads, index = find_pruneable_heads_and_indices(
251
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
252
+ )
253
+
254
+ # Prune linear layers
255
+ self.self.query = prune_linear_layer(self.self.query, index)
256
+ self.self.key = prune_linear_layer(self.self.key, index)
257
+ self.self.value = prune_linear_layer(self.self.value, index)
258
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
259
+
260
+ # Update hyper params and store pruned heads
261
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
262
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
263
+ self.pruned_heads = self.pruned_heads.union(heads)
264
+
265
+ def forward(
266
+ self,
267
+ hidden_states: torch.Tensor,
268
+ attention_mask: Optional[torch.FloatTensor] = None,
269
+ head_mask: Optional[torch.FloatTensor] = None,
270
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
271
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
272
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
273
+ output_attentions: Optional[bool] = False,
274
+ ) -> Tuple[torch.Tensor]:
275
+ self_outputs = self.self(
276
+ hidden_states,
277
+ attention_mask,
278
+ head_mask,
279
+ encoder_hidden_states,
280
+ encoder_attention_mask,
281
+ past_key_value,
282
+ output_attentions,
283
+ )
284
+ attention_output = self.output(self_outputs[0], hidden_states)
285
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
286
+ return outputs
287
+
288
+
289
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert -> BlipText
290
+ class BlipTextIntermediate(nn.Module):
291
+ def __init__(self, config):
292
+ super().__init__()
293
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
294
+ if isinstance(config.hidden_act, str):
295
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
296
+ else:
297
+ self.intermediate_act_fn = config.hidden_act
298
+
299
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
300
+ hidden_states = self.dense(hidden_states)
301
+ hidden_states = self.intermediate_act_fn(hidden_states)
302
+ return hidden_states
303
+
304
+
305
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert -> BlipText
306
+ class BlipTextOutput(nn.Module):
307
+ def __init__(self, config):
308
+ super().__init__()
309
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
310
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
311
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
312
+
313
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
314
+ hidden_states = self.dense(hidden_states)
315
+ hidden_states = self.dropout(hidden_states)
316
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
317
+ return hidden_states
318
+
319
+
320
+ class BlipTextLayer(nn.Module):
321
+ def __init__(self, config, layer_num):
322
+ super().__init__()
323
+ self.config = config
324
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
325
+ self.seq_len_dim = 1
326
+ self.attention = BlipTextAttention(config)
327
+ self.layer_num = layer_num
328
+ if self.config.is_decoder:
329
+ self.crossattention = BlipTextAttention(config, is_cross_attention=self.config.is_decoder)
330
+ self.intermediate = BlipTextIntermediate(config)
331
+ self.output = BlipTextOutput(config)
332
+
333
+ def forward(
334
+ self,
335
+ hidden_states: torch.Tensor,
336
+ attention_mask: Optional[torch.FloatTensor] = None,
337
+ head_mask: Optional[torch.FloatTensor] = None,
338
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
339
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
340
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
341
+ output_attentions: Optional[bool] = False,
342
+ ) -> Tuple[torch.Tensor]:
343
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
344
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
345
+ self_attention_outputs = self.attention(
346
+ hidden_states,
347
+ attention_mask,
348
+ head_mask,
349
+ output_attentions=output_attentions,
350
+ past_key_value=self_attn_past_key_value,
351
+ )
352
+ attention_output = self_attention_outputs[0]
353
+
354
+ outputs = self_attention_outputs[1:-1]
355
+ present_key_value = self_attention_outputs[-1]
356
+
357
+ if encoder_hidden_states is not None:
358
+ cross_attention_outputs = self.crossattention(
359
+ attention_output,
360
+ attention_mask,
361
+ head_mask,
362
+ encoder_hidden_states,
363
+ encoder_attention_mask,
364
+ output_attentions=output_attentions,
365
+ )
366
+ attention_output = cross_attention_outputs[0]
367
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
368
+ layer_output = apply_chunking_to_forward(
369
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
370
+ )
371
+ outputs = (layer_output,) + outputs
372
+
373
+ outputs = outputs + (present_key_value,)
374
+
375
+ return outputs
376
+
377
+ def feed_forward_chunk(self, attention_output):
378
+ intermediate_output = self.intermediate(attention_output)
379
+ layer_output = self.output(intermediate_output, attention_output)
380
+ return layer_output
381
+
382
+
383
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L386
384
+ class BlipTextEncoder(nn.Module):
385
+ def __init__(self, config):
386
+ super().__init__()
387
+ self.config = config
388
+ self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)])
389
+ self.gradient_checkpointing = False
390
+
391
+ def forward(
392
+ self,
393
+ hidden_states: torch.Tensor,
394
+ attention_mask: Optional[torch.FloatTensor] = None,
395
+ head_mask: Optional[torch.FloatTensor] = None,
396
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
397
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
398
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
399
+ use_cache: Optional[bool] = None,
400
+ output_attentions: Optional[bool] = False,
401
+ output_hidden_states: Optional[bool] = False,
402
+ return_dict: Optional[bool] = True,
403
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
404
+ if self.gradient_checkpointing and self.training:
405
+ if use_cache:
406
+ logger.warning(
407
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
408
+ )
409
+ use_cache = False
410
+ all_hidden_states = () if output_hidden_states else None
411
+ all_self_attentions = () if output_attentions else None
412
+ all_cross_attentions = () if output_attentions and self.config.is_decoder else None
413
+
414
+ next_decoder_cache = () if use_cache else None
415
+
416
+ for i in range(self.config.num_hidden_layers):
417
+ layer_module = self.layer[i]
418
+ if output_hidden_states:
419
+ all_hidden_states = all_hidden_states + (hidden_states,)
420
+
421
+ layer_head_mask = head_mask[i] if head_mask is not None else None
422
+ past_key_value = past_key_values[i] if past_key_values is not None else None
423
+
424
+ if self.gradient_checkpointing and self.training:
425
+ layer_outputs = self._gradient_checkpointing_func(
426
+ layer_module.__call__,
427
+ hidden_states,
428
+ attention_mask,
429
+ layer_head_mask,
430
+ encoder_hidden_states,
431
+ encoder_attention_mask,
432
+ past_key_value,
433
+ output_attentions,
434
+ )
435
+ else:
436
+ layer_outputs = layer_module(
437
+ hidden_states,
438
+ attention_mask,
439
+ layer_head_mask,
440
+ encoder_hidden_states,
441
+ encoder_attention_mask,
442
+ past_key_value,
443
+ output_attentions,
444
+ )
445
+
446
+ hidden_states = layer_outputs[0]
447
+ if use_cache:
448
+ next_decoder_cache += (layer_outputs[-1],)
449
+ if output_attentions:
450
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
451
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
452
+
453
+ if output_hidden_states:
454
+ all_hidden_states = all_hidden_states + (hidden_states,)
455
+
456
+ if not return_dict:
457
+ return tuple(
458
+ v
459
+ for v in [
460
+ hidden_states,
461
+ next_decoder_cache,
462
+ all_hidden_states,
463
+ all_self_attentions,
464
+ all_cross_attentions,
465
+ ]
466
+ if v is not None
467
+ )
468
+ return BaseModelOutputWithPastAndCrossAttentions(
469
+ last_hidden_state=hidden_states,
470
+ past_key_values=next_decoder_cache,
471
+ hidden_states=all_hidden_states,
472
+ attentions=all_self_attentions,
473
+ cross_attentions=all_cross_attentions,
474
+ )
475
+
476
+
477
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->BlipText
478
+ class BlipTextPooler(nn.Module):
479
+ def __init__(self, config):
480
+ super().__init__()
481
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
482
+ self.activation = nn.Tanh()
483
+
484
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
485
+ # We "pool" the model by simply taking the hidden state corresponding
486
+ # to the first token.
487
+ first_token_tensor = hidden_states[:, 0]
488
+ pooled_output = self.dense(first_token_tensor)
489
+ pooled_output = self.activation(pooled_output)
490
+ return pooled_output
491
+
492
+
493
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BlipText
494
+ class BlipTextPredictionHeadTransform(nn.Module):
495
+ def __init__(self, config):
496
+ super().__init__()
497
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
498
+ if isinstance(config.hidden_act, str):
499
+ self.transform_act_fn = ACT2FN[config.hidden_act]
500
+ else:
501
+ self.transform_act_fn = config.hidden_act
502
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
503
+
504
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
505
+ hidden_states = self.dense(hidden_states)
506
+ hidden_states = self.transform_act_fn(hidden_states)
507
+ hidden_states = self.LayerNorm(hidden_states)
508
+ return hidden_states
509
+
510
+
511
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BlipText
512
+ class BlipTextLMPredictionHead(nn.Module):
513
+ def __init__(self, config):
514
+ super().__init__()
515
+ self.transform = BlipTextPredictionHeadTransform(config)
516
+
517
+ # The output weights are the same as the input embeddings, but there is
518
+ # an output-only bias for each token.
519
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
520
+
521
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
522
+
523
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
524
+ self.decoder.bias = self.bias
525
+
526
+ def forward(self, hidden_states):
527
+ hidden_states = self.transform(hidden_states)
528
+ hidden_states = self.decoder(hidden_states)
529
+ return hidden_states
530
+
531
+
532
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BlipText
533
+ class BlipTextOnlyMLMHead(nn.Module):
534
+ def __init__(self, config):
535
+ super().__init__()
536
+ self.predictions = BlipTextLMPredictionHead(config)
537
+
538
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
539
+ prediction_scores = self.predictions(sequence_output)
540
+ return prediction_scores
541
+
542
+
543
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L548
544
+ class BlipTextPreTrainedModel(PreTrainedModel):
545
+ """
546
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
547
+ models.
548
+ """
549
+
550
+ config_class = BlipTextConfig
551
+ base_model_prefix = "bert"
552
+
553
+ def _init_weights(self, module):
554
+ """Initialize the weights"""
555
+ if isinstance(module, (nn.Linear, nn.Embedding)):
556
+ # Slightly different from the TF version which uses truncated_normal for initialization
557
+ # cf https://github.com/pytorch/pytorch/pull/5617
558
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
559
+ elif isinstance(module, nn.LayerNorm):
560
+ module.bias.data.zero_()
561
+ module.weight.data.fill_(1.0)
562
+ if isinstance(module, nn.Linear) and module.bias is not None:
563
+ module.bias.data.zero_()
564
+
565
+
566
+ # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571
567
+ class BlipTextModel(BlipTextPreTrainedModel):
568
+ """
569
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
570
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
571
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
572
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an
573
+ `encoder_hidden_states` is then expected as an input to the forward pass.
574
+ """
575
+
576
+ def __init__(self, config, add_pooling_layer=True):
577
+ super().__init__(config)
578
+ self.config = config
579
+
580
+ self.embeddings = BlipTextEmbeddings(config)
581
+ self.encoder = BlipTextEncoder(config)
582
+ self.pooler = BlipTextPooler(config) if add_pooling_layer else None
583
+
584
+ self.post_init()
585
+
586
+ def get_input_embeddings(self):
587
+ return self.embeddings.word_embeddings
588
+
589
+ def set_input_embeddings(self, value):
590
+ self.embeddings.word_embeddings = value
591
+
592
+ # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
593
+ def _prune_heads(self, heads_to_prune):
594
+ """
595
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
596
+ class PreTrainedModel
597
+ """
598
+ for layer, heads in heads_to_prune.items():
599
+ self.encoder.layer[layer].attention.prune_heads(heads)
600
+
601
+ def get_extended_attention_mask(
602
+ self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool
603
+ ) -> Tensor:
604
+ """
605
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
606
+
607
+ Arguments:
608
+ attention_mask (`torch.Tensor`):
609
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
610
+ input_shape (`Tuple[int]`):
611
+ The shape of the input to the model.
612
+ device (`torch.device`):
613
+ The device of the input to the model.
614
+
615
+ Returns:
616
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
617
+ """
618
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
619
+ # ourselves in which case we just need to make it broadcastable to all heads.
620
+ if attention_mask.dim() == 3:
621
+ extended_attention_mask = attention_mask[:, None, :, :]
622
+ elif attention_mask.dim() == 2:
623
+ # Provided a padding mask of dimensions [batch_size, seq_length]
624
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
625
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
626
+ if is_decoder:
627
+ batch_size, seq_length = input_shape
628
+
629
+ seq_ids = torch.arange(seq_length, device=device)
630
+ causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
631
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
632
+ # causal and attention masks must have same type with pytorch version < 1.3
633
+ causal_mask = causal_mask.to(attention_mask.dtype)
634
+
635
+ if causal_mask.shape[1] < attention_mask.shape[1]:
636
+ prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
637
+ causal_mask = torch.cat(
638
+ [
639
+ torch.ones(
640
+ (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
641
+ ),
642
+ causal_mask,
643
+ ],
644
+ axis=-1,
645
+ )
646
+
647
+ extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
648
+ else:
649
+ extended_attention_mask = attention_mask[:, None, None, :]
650
+ else:
651
+ raise ValueError(
652
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
653
+ input_shape, attention_mask.shape
654
+ )
655
+ )
656
+
657
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
658
+ # masked positions, this operation will create a tensor which is 0.0 for
659
+ # positions we want to attend and -10000.0 for masked positions.
660
+ # Since we are adding it to the raw scores before the softmax, this is
661
+ # effectively the same as removing these entirely.
662
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
663
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
664
+ return extended_attention_mask
665
+
666
+ def forward(
667
+ self,
668
+ input_ids: Optional[torch.Tensor] = None,
669
+ attention_mask: Optional[torch.Tensor] = None,
670
+ position_ids: Optional[torch.Tensor] = None,
671
+ head_mask: Optional[torch.Tensor] = None,
672
+ inputs_embeds: Optional[torch.Tensor] = None,
673
+ encoder_embeds: Optional[torch.Tensor] = None,
674
+ encoder_hidden_states: Optional[torch.Tensor] = None,
675
+ encoder_attention_mask: Optional[torch.Tensor] = None,
676
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
677
+ use_cache: Optional[bool] = None,
678
+ output_attentions: Optional[bool] = None,
679
+ output_hidden_states: Optional[bool] = None,
680
+ return_dict: Optional[bool] = None,
681
+ is_decoder: Optional[bool] = False,
682
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
683
+ r"""
684
+ encoder_hidden_states (`torch.FloatTensor`, *optional*):
685
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
686
+ the model is configured as a decoder.
687
+ encoder_attention_mask (`torch.FloatTensor`, *optional*):
688
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
689
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
690
+ - 1 for tokens that are **not masked**,
691
+ - 0 for tokens that are **masked**.
692
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*):
693
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
694
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
695
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
696
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
697
+ use_cache (`bool`, *optional*):
698
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
699
+ `past_key_values`).
700
+ """
701
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
702
+ output_hidden_states = (
703
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
704
+ )
705
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
706
+
707
+ if is_decoder:
708
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
709
+ else:
710
+ use_cache = False
711
+
712
+ if input_ids is not None and inputs_embeds is not None:
713
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
714
+ elif input_ids is not None:
715
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
716
+ input_shape = input_ids.size()
717
+ batch_size, seq_length = input_shape
718
+ device = input_ids.device
719
+ elif inputs_embeds is not None:
720
+ input_shape = inputs_embeds.size()[:-1]
721
+ batch_size, seq_length = input_shape
722
+ device = inputs_embeds.device
723
+ elif encoder_embeds is not None:
724
+ input_shape = encoder_embeds.size()[:-1]
725
+ batch_size, seq_length = input_shape
726
+ device = encoder_embeds.device
727
+ else:
728
+ raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
729
+
730
+ # past_key_values_length
731
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
732
+
733
+ if attention_mask is None:
734
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length))).to(device)
735
+
736
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
737
+ # ourselves in which case we just need to make it broadcastable to all heads.
738
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
739
+ attention_mask, input_shape, device, is_decoder
740
+ )
741
+
742
+ # If a 2D or 3D attention mask is provided for the cross-attention
743
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
744
+ if encoder_hidden_states is not None:
745
+ if isinstance(encoder_hidden_states, list):
746
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
747
+ else:
748
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
749
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
750
+
751
+ if isinstance(encoder_attention_mask, list):
752
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
753
+ elif encoder_attention_mask is None:
754
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
755
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
756
+ else:
757
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
758
+ else:
759
+ encoder_extended_attention_mask = None
760
+
761
+ # Prepare head mask if needed
762
+ # 1.0 in head_mask indicate we keep the head
763
+ # attention_probs has shape bsz x n_heads x N x N
764
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
765
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
766
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
767
+
768
+ if encoder_embeds is None:
769
+ embedding_output = self.embeddings(
770
+ input_ids=input_ids,
771
+ position_ids=position_ids,
772
+ inputs_embeds=inputs_embeds,
773
+ past_key_values_length=past_key_values_length,
774
+ )
775
+ else:
776
+ embedding_output = encoder_embeds
777
+
778
+ encoder_outputs = self.encoder(
779
+ embedding_output,
780
+ attention_mask=extended_attention_mask,
781
+ head_mask=head_mask,
782
+ encoder_hidden_states=encoder_hidden_states,
783
+ encoder_attention_mask=encoder_extended_attention_mask,
784
+ past_key_values=past_key_values,
785
+ use_cache=use_cache,
786
+ output_attentions=output_attentions,
787
+ output_hidden_states=output_hidden_states,
788
+ return_dict=return_dict,
789
+ )
790
+ sequence_output = encoder_outputs[0]
791
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
792
+
793
+ if not return_dict:
794
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
795
+
796
+ return BaseModelOutputWithPoolingAndCrossAttentions(
797
+ last_hidden_state=sequence_output,
798
+ pooler_output=pooled_output,
799
+ past_key_values=encoder_outputs.past_key_values,
800
+ hidden_states=encoder_outputs.hidden_states,
801
+ attentions=encoder_outputs.attentions,
802
+ cross_attentions=encoder_outputs.cross_attentions,
803
+ )
804
+
805
+
806
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L811
807
+ class BlipTextLMHeadModel(BlipTextPreTrainedModel):
808
+ def __init__(self, config):
809
+ super().__init__(config)
810
+
811
+ self.bert = BlipTextModel(config, add_pooling_layer=False)
812
+ self.cls = BlipTextOnlyMLMHead(config)
813
+ self.label_smoothing = config.label_smoothing
814
+
815
+ def get_output_embeddings(self):
816
+ return self.cls.predictions.decoder
817
+
818
+ def set_output_embeddings(self, new_embeddings):
819
+ self.cls.predictions.decoder = new_embeddings
820
+
821
+ def forward(
822
+ self,
823
+ input_ids: Optional[torch.Tensor] = None,
824
+ attention_mask: Optional[torch.Tensor] = None,
825
+ position_ids: Optional[torch.Tensor] = None,
826
+ head_mask: Optional[torch.Tensor] = None,
827
+ inputs_embeds: Optional[torch.Tensor] = None,
828
+ encoder_hidden_states: Optional[torch.Tensor] = None,
829
+ encoder_attention_mask: Optional[torch.Tensor] = None,
830
+ labels: Optional[torch.Tensor] = None,
831
+ past_key_values: Optional[List[torch.Tensor]] = None,
832
+ use_cache: Optional[bool] = None,
833
+ output_attentions: Optional[bool] = None,
834
+ output_hidden_states: Optional[bool] = None,
835
+ return_dict: Optional[bool] = None,
836
+ return_logits: Optional[bool] = False,
837
+ is_decoder: Optional[bool] = True,
838
+ reduction: Optional[str] = "mean",
839
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
840
+ r"""
841
+ encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of
842
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is
843
+ configured as a decoder.
844
+ encoder_attention_mask (`torch.FloatTensor`, *optional*):
845
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
846
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
847
+ - 1 for tokens that are **not masked**,
848
+ - 0 for tokens that are **masked**.
849
+ labels (`torch.LongTensor`, *optional*):
850
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
851
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
852
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
853
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*):
854
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
855
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
856
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
857
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
858
+ use_cache (`bool`, *optional*):
859
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
860
+ `past_key_values`).
861
+ """
862
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
863
+ if labels is not None:
864
+ use_cache = False
865
+
866
+ outputs = self.bert(
867
+ input_ids,
868
+ attention_mask=attention_mask,
869
+ position_ids=position_ids,
870
+ head_mask=head_mask,
871
+ inputs_embeds=inputs_embeds,
872
+ encoder_hidden_states=encoder_hidden_states,
873
+ encoder_attention_mask=encoder_attention_mask,
874
+ past_key_values=past_key_values,
875
+ use_cache=use_cache,
876
+ output_attentions=output_attentions,
877
+ output_hidden_states=output_hidden_states,
878
+ return_dict=return_dict,
879
+ is_decoder=is_decoder,
880
+ )
881
+
882
+ sequence_output = outputs[0]
883
+ prediction_scores = self.cls(sequence_output)
884
+
885
+ if return_logits:
886
+ return prediction_scores[:, :-1, :].contiguous()
887
+
888
+ lm_loss = None
889
+ if labels is not None:
890
+ # we are doing next-token prediction; shift prediction scores and input ids by one
891
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
892
+ labels = labels[:, 1:].contiguous().to(shifted_prediction_scores.device)
893
+ loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=self.label_smoothing)
894
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
895
+ if reduction == "none":
896
+ lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
897
+
898
+ if not return_dict:
899
+ output = (prediction_scores,) + outputs[2:]
900
+ return ((lm_loss,) + output) if lm_loss is not None else output
901
+
902
+ return CausalLMOutputWithCrossAttentions(
903
+ loss=lm_loss,
904
+ logits=prediction_scores,
905
+ past_key_values=outputs.past_key_values,
906
+ hidden_states=outputs.hidden_states,
907
+ attentions=outputs.attentions,
908
+ cross_attentions=outputs.cross_attentions,
909
+ )
910
+
911
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
912
+ input_shape = input_ids.shape
913
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
914
+ if attention_mask is None:
915
+ attention_mask = input_ids.new_ones(input_shape)
916
+
917
+ # cut decoder_input_ids if past_key_values is used
918
+ if past_key_values is not None:
919
+ past_length = past_key_values[0][0].shape[2]
920
+
921
+ # Some generation methods already pass only the last input ID
922
+ if input_ids.shape[1] > past_length:
923
+ remove_prefix_length = past_length
924
+ else:
925
+ # Default to old behavior: keep only final ID
926
+ remove_prefix_length = input_ids.shape[1] - 1
927
+
928
+ input_ids = input_ids[:, remove_prefix_length:]
929
+
930
+ return {
931
+ "input_ids": input_ids,
932
+ "attention_mask": attention_mask,
933
+ "past_key_values": past_key_values,
934
+ "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
935
+ "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
936
+ "is_decoder": True,
937
+ }
938
+
939
+ def _reorder_cache(self, past_key_values, beam_idx):
940
+ reordered_past = ()
941
+ for layer_past in past_key_values:
942
+ reordered_past += (
943
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
944
+ )
945
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py ADDED
@@ -0,0 +1,1701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TensorFlow BLIP model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ import warnings
20
+ from dataclasses import dataclass
21
+ from typing import Any, Optional, Tuple, Union
22
+
23
+ import tensorflow as tf
24
+
25
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
26
+ from ...modeling_tf_utils import (
27
+ TFPreTrainedModel,
28
+ get_initializer,
29
+ get_tf_activation,
30
+ keras,
31
+ keras_serializable,
32
+ shape_list,
33
+ unpack_inputs,
34
+ )
35
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
44
+ from .modeling_tf_blip_text import BLIP_TEXT_INPUTS_DOCSTRING, TFBlipTextLMHeadModel, TFBlipTextModel
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
50
+
51
+
52
+ from ..deprecated._archive_maps import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # Copied from transformers.models.clip.modeling_tf_clip.contrastive_loss
56
+ def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
57
+ return tf.math.reduce_mean(
58
+ keras.metrics.sparse_categorical_crossentropy(
59
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
60
+ )
61
+ )
62
+
63
+
64
+ # Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->blip
65
+ def blip_loss(similarity: tf.Tensor) -> tf.Tensor:
66
+ caption_loss = contrastive_loss(similarity)
67
+ image_loss = contrastive_loss(tf.transpose(similarity))
68
+ return (caption_loss + image_loss) / 2.0
69
+
70
+
71
+ @dataclass
72
+ class TFBlipForConditionalGenerationModelOutput(ModelOutput):
73
+ """
74
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
75
+ last hidden states. This class also adds the loss term from the text decoder.
76
+
77
+ Args:
78
+ loss (`tf.Tensor`, *optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
79
+ Languge modeling loss from the text decoder.
80
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
81
+ Prediction scores of the language modeling head of the text decoder model.
82
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)`, *optional*):
83
+ The image embeddings obtained after applying the Vision Transformer model to the input image.
84
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
85
+ Sequence of hidden-states at the output of the last layer of the model.
86
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True`):
87
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
88
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
89
+
90
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
91
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed):
92
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
93
+ sequence_length)`.
94
+
95
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
96
+ heads.`
97
+ """
98
+
99
+ loss: Tuple[tf.Tensor] | None = None
100
+ logits: Tuple[tf.Tensor] | None = None
101
+ image_embeds: tf.Tensor | None = None
102
+ last_hidden_state: tf.Tensor = None
103
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
104
+ attentions: Tuple[tf.Tensor, ...] | None = None
105
+
106
+ @property
107
+ def decoder_logits(self):
108
+ warnings.warn(
109
+ "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
110
+ " Please use the `logits` attribute to retrieve the final output instead.",
111
+ FutureWarning,
112
+ )
113
+ return self.logits
114
+
115
+
116
+ @dataclass
117
+ class TFBlipTextVisionModelOutput(ModelOutput):
118
+ """
119
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
120
+ last hidden states. This class also adds the loss term from the text decoder.
121
+
122
+ Args:
123
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
124
+ Languge modeling loss from the text decoder.
125
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
126
+ The image embeddings obtained by applying the projection layer to the pooler_output.
127
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
128
+ Sequence of hidden-states at the output of the last layer of the model.
129
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
130
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
131
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
132
+
133
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
134
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
135
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
136
+ sequence_length)`.
137
+
138
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
139
+ heads.
140
+ """
141
+
142
+ loss: tf.Tensor | None = None
143
+ image_embeds: tf.Tensor | None = None
144
+ last_hidden_state: tf.Tensor = None
145
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
146
+ attentions: Tuple[tf.Tensor, ...] | None = None
147
+
148
+
149
+ @dataclass
150
+ class TFBlipImageTextMatchingModelOutput(ModelOutput):
151
+ """
152
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
153
+ last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
154
+ scores.
155
+
156
+ Args:
157
+ itm_score (`tf.Tensor`):
158
+ The image-text similarity scores.
159
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
160
+ Languge modeling loss from the text decoder.
161
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
162
+ The image embeddings obtained by applying the projection layer to the pooler_output.
163
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
164
+ Sequence of hidden-states at the output of the last layer of the model.
165
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
166
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
167
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
168
+
169
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
170
+ vision_pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`, *optional*):
171
+ Last layer hidden-state of the vision of the vision-only branch of the model.
172
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
173
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
174
+ sequence_length)`.
175
+
176
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
177
+ heads.
178
+ question_embeds (`tf.Tensor`):
179
+ The question embeddings obtained by the text projection layer.
180
+ """
181
+
182
+ itm_score: tf.Tensor | None = None
183
+ loss: tf.Tensor | None = None
184
+ image_embeds: tf.Tensor | None = None
185
+ last_hidden_state: tf.Tensor = None
186
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
187
+ vision_pooler_output: tf.Tensor | None = None
188
+ attentions: Tuple[tf.Tensor, ...] | None = None
189
+ question_embeds: Tuple[tf.Tensor] | None = None
190
+
191
+
192
+ @dataclass
193
+ class TFBlipOutput(ModelOutput):
194
+ """
195
+ Args:
196
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
197
+ Contrastive loss for image-text similarity.
198
+ logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
199
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
200
+ similarity scores.
201
+ logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
202
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
203
+ similarity scores.
204
+ text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
205
+ The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
206
+ image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
207
+ The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
208
+ text_model_output(`BaseModelOutputWithPooling`):
209
+ The output of the [`BlipTextModel`].
210
+ vision_model_output(`BaseModelOutputWithPooling`):
211
+ The output of the [`BlipVisionModel`].
212
+ """
213
+
214
+ loss: tf.Tensor | None = None
215
+ logits_per_image: tf.Tensor = None
216
+ logits_per_text: tf.Tensor = None
217
+ text_embeds: tf.Tensor = None
218
+ image_embeds: tf.Tensor = None
219
+ text_model_output: TFBaseModelOutputWithPooling = None
220
+ vision_model_output: TFBaseModelOutputWithPooling = None
221
+
222
+ def to_tuple(self) -> Tuple[Any]:
223
+ return tuple(
224
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
225
+ for k in self.keys()
226
+ )
227
+
228
+
229
+ class TFBlipVisionEmbeddings(keras.layers.Layer):
230
+ def __init__(self, config: BlipVisionConfig, **kwargs):
231
+ super().__init__(**kwargs)
232
+ self.config = config
233
+ self.embed_dim = config.hidden_size
234
+ self.image_size = config.image_size
235
+ self.patch_size = config.patch_size
236
+
237
+ self.patch_embedding = keras.layers.Conv2D(
238
+ filters=self.embed_dim,
239
+ kernel_size=self.patch_size,
240
+ strides=self.patch_size,
241
+ kernel_initializer=get_initializer(self.config.initializer_range),
242
+ data_format="channels_last",
243
+ name="patch_embedding",
244
+ )
245
+
246
+ self.num_patches = (self.image_size // self.patch_size) ** 2
247
+ self.num_positions = self.num_patches + 1
248
+
249
+ def build(self, input_shape=None):
250
+ self.class_embedding = self.add_weight(
251
+ shape=(1, 1, self.embed_dim),
252
+ initializer=get_initializer(self.config.initializer_range),
253
+ trainable=True,
254
+ name="class_embedding",
255
+ )
256
+
257
+ self.position_embedding = self.add_weight(
258
+ shape=(1, self.num_positions, self.embed_dim),
259
+ initializer=get_initializer(self.config.initializer_range),
260
+ trainable=True,
261
+ name="position_embedding",
262
+ )
263
+
264
+ if self.built:
265
+ return
266
+ self.built = True
267
+ if getattr(self, "patch_embedding", None) is not None:
268
+ with tf.name_scope(self.patch_embedding.name):
269
+ self.patch_embedding.build([None, None, None, 3])
270
+
271
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
272
+ # Input is channels-first, we transpose. PyTorch transposes after the conv because PyTorch
273
+ # likes channels-first convs.
274
+ batch_size = tf.shape(pixel_values)[0]
275
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
276
+ patch_embeds = self.patch_embedding(pixel_values)
277
+ patch_embeds = tf.reshape(patch_embeds, (batch_size, self.num_patches, -1))
278
+
279
+ class_embeds = tf.broadcast_to(self.class_embedding, (batch_size, 1, self.embed_dim))
280
+ embeddings = tf.concat([class_embeds, patch_embeds], axis=1)
281
+ embeddings = embeddings + self.position_embedding[:, : tf.shape(embeddings)[1], :]
282
+ return embeddings
283
+
284
+
285
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->Blip
286
+ class TFBlipTextEmbeddings(keras.layers.Layer):
287
+ def __init__(self, config: BlipTextConfig, **kwargs):
288
+ super().__init__(**kwargs)
289
+
290
+ self.embed_dim = config.hidden_size
291
+
292
+ self.config = config
293
+
294
+ def build(self, input_shape: tf.TensorShape = None):
295
+ with tf.name_scope("token_embedding"):
296
+ self.weight = self.add_weight(
297
+ shape=(self.config.vocab_size, self.embed_dim),
298
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
299
+ trainable=True,
300
+ name="weight",
301
+ )
302
+
303
+ with tf.name_scope("position_embedding"):
304
+ self.position_embedding = self.add_weight(
305
+ shape=(self.config.max_position_embeddings, self.embed_dim),
306
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
307
+ trainable=True,
308
+ name="embeddings",
309
+ )
310
+
311
+ super().build(input_shape)
312
+
313
+ def call(
314
+ self,
315
+ input_ids: tf.Tensor = None,
316
+ position_ids: tf.Tensor = None,
317
+ inputs_embeds: tf.Tensor = None,
318
+ ) -> tf.Tensor:
319
+ """
320
+ Applies embedding based on inputs tensor.
321
+
322
+ Returns:
323
+ final_embeddings (`tf.Tensor`): output embedding tensor.
324
+ """
325
+ if input_ids is None and inputs_embeds is None:
326
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
327
+
328
+ if inputs_embeds is None:
329
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
330
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
331
+
332
+ input_shape = shape_list(inputs_embeds)[:-1]
333
+
334
+ if position_ids is None:
335
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
336
+
337
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
338
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
339
+ final_embeddings = inputs_embeds + position_embeds
340
+
341
+ return final_embeddings
342
+
343
+
344
+ class TFBlipAttention(keras.layers.Layer):
345
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
346
+
347
+ def __init__(self, config, **kwargs):
348
+ super().__init__(**kwargs)
349
+ self.config = config
350
+ self.embed_dim = config.hidden_size
351
+ self.num_heads = config.num_attention_heads
352
+ self.head_dim = self.embed_dim // self.num_heads
353
+ if self.head_dim * self.num_heads != self.embed_dim:
354
+ raise ValueError(
355
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
356
+ f" {self.num_heads})."
357
+ )
358
+ self.scale = self.head_dim**-0.5
359
+ self.dropout = keras.layers.Dropout(config.attention_dropout, name="dropout")
360
+
361
+ self.qkv = keras.layers.Dense(
362
+ 3 * self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="qkv"
363
+ )
364
+
365
+ self.projection = keras.layers.Dense(
366
+ self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="projection"
367
+ )
368
+
369
+ def call(
370
+ self,
371
+ hidden_states: tf.Tensor,
372
+ head_mask: tf.Tensor | None = None,
373
+ output_attentions: Optional[bool] = False,
374
+ training: Optional[bool] = None,
375
+ ) -> Tuple[tf.Tensor, tf.Tensor | None, Tuple[tf.Tensor] | None]:
376
+ """Input shape: Batch x Time x Channel"""
377
+
378
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
379
+
380
+ mixed_qkv = self.qkv(hidden_states)
381
+ mixed_qkv = tf.reshape(mixed_qkv, (bsz, tgt_len, 3, self.num_heads, self.head_dim))
382
+ mixed_qkv = tf.transpose(mixed_qkv, perm=(2, 0, 3, 1, 4))
383
+
384
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
385
+
386
+ # Take the dot product between "query" and "key" to get the raw attention scores.
387
+ attention_scores = query_states @ tf.transpose(key_states, (0, 1, 3, 2))
388
+
389
+ attention_scores = attention_scores * self.scale
390
+
391
+ # Normalize the attention scores to probabilities.
392
+ attention_probs = stable_softmax(attention_scores, axis=-1)
393
+
394
+ # This is actually dropping out entire tokens to attend to, which might
395
+ # seem a bit unusual, but is taken from the original Transformer paper.
396
+ attention_probs = self.dropout(attention_probs, training=training)
397
+
398
+ # Mask heads if we want to
399
+ if head_mask is not None:
400
+ attention_probs = attention_probs * head_mask
401
+
402
+ context_layer = tf.transpose(attention_probs @ value_states, perm=(0, 2, 1, 3))
403
+
404
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.embed_dim]
405
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
406
+
407
+ output = self.projection(context_layer)
408
+
409
+ outputs = (output, attention_probs) if output_attentions else (output, None)
410
+
411
+ return outputs
412
+
413
+ def build(self, input_shape=None):
414
+ if self.built:
415
+ return
416
+ self.built = True
417
+ if getattr(self, "dropout", None) is not None:
418
+ with tf.name_scope(self.dropout.name):
419
+ self.dropout.build(None)
420
+ if getattr(self, "qkv", None) is not None:
421
+ with tf.name_scope(self.qkv.name):
422
+ self.qkv.build([None, None, self.embed_dim])
423
+ if getattr(self, "projection", None) is not None:
424
+ with tf.name_scope(self.projection.name):
425
+ self.projection.build([None, None, self.embed_dim])
426
+
427
+
428
+ class TFBlipMLP(keras.layers.Layer):
429
+ def __init__(self, config: BlipConfig, **kwargs):
430
+ super().__init__(**kwargs)
431
+
432
+ self.activation_fn = get_tf_activation(config.hidden_act)
433
+
434
+ in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5)
435
+ fc_std = (2 * config.hidden_size) ** -0.5
436
+
437
+ self.fc1 = keras.layers.Dense(
438
+ units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1"
439
+ )
440
+ self.fc2 = keras.layers.Dense(
441
+ units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2"
442
+ )
443
+ self.config = config
444
+
445
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
446
+ hidden_states = self.fc1(inputs=hidden_states)
447
+ hidden_states = self.activation_fn(hidden_states)
448
+ hidden_states = self.fc2(inputs=hidden_states)
449
+ return hidden_states
450
+
451
+ def build(self, input_shape=None):
452
+ if self.built:
453
+ return
454
+ self.built = True
455
+ if getattr(self, "fc1", None) is not None:
456
+ with tf.name_scope(self.fc1.name):
457
+ self.fc1.build([None, None, self.config.hidden_size])
458
+ if getattr(self, "fc2", None) is not None:
459
+ with tf.name_scope(self.fc2.name):
460
+ self.fc2.build([None, None, self.config.intermediate_size])
461
+
462
+
463
+ class TFBlipEncoderLayer(keras.layers.Layer):
464
+ def __init__(self, config: BlipConfig, **kwargs):
465
+ super().__init__(**kwargs)
466
+ self.embed_dim = config.hidden_size
467
+ self.self_attn = TFBlipAttention(config, name="self_attn")
468
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
469
+ self.mlp = TFBlipMLP(config, name="mlp")
470
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
471
+
472
+ def call(
473
+ self,
474
+ hidden_states: tf.Tensor,
475
+ attention_mask: tf.Tensor,
476
+ output_attentions: Optional[bool] = False,
477
+ training: Optional[bool] = None,
478
+ ) -> Tuple[tf.Tensor]:
479
+ """
480
+ Args:
481
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
482
+ attention_mask (`tf.Tensor`): attention mask of size
483
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
484
+ `(config.encoder_attention_heads,)`.
485
+ output_attentions (`bool`, *optional*):
486
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
487
+ returned tensors for more detail.
488
+ """
489
+ residual = hidden_states
490
+
491
+ hidden_states = self.layer_norm1(hidden_states)
492
+ hidden_states, attn_weights = self.self_attn(
493
+ hidden_states=hidden_states,
494
+ head_mask=attention_mask,
495
+ output_attentions=output_attentions,
496
+ training=training,
497
+ )
498
+ hidden_states = hidden_states + residual
499
+ residual = hidden_states
500
+ hidden_states = self.layer_norm2(hidden_states)
501
+ hidden_states = self.mlp(hidden_states)
502
+
503
+ hidden_states = hidden_states + residual
504
+
505
+ outputs = (hidden_states,)
506
+
507
+ if output_attentions:
508
+ outputs += (attn_weights,)
509
+
510
+ return outputs
511
+
512
+ def build(self, input_shape=None):
513
+ if self.built:
514
+ return
515
+ self.built = True
516
+ if getattr(self, "self_attn", None) is not None:
517
+ with tf.name_scope(self.self_attn.name):
518
+ self.self_attn.build(None)
519
+ if getattr(self, "layer_norm1", None) is not None:
520
+ with tf.name_scope(self.layer_norm1.name):
521
+ self.layer_norm1.build([None, None, self.embed_dim])
522
+ if getattr(self, "mlp", None) is not None:
523
+ with tf.name_scope(self.mlp.name):
524
+ self.mlp.build(None)
525
+ if getattr(self, "layer_norm2", None) is not None:
526
+ with tf.name_scope(self.layer_norm2.name):
527
+ self.layer_norm2.build([None, None, self.embed_dim])
528
+
529
+
530
+ class TFBlipPreTrainedModel(TFPreTrainedModel):
531
+ """
532
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
533
+ models.
534
+ """
535
+
536
+ config_class = BlipConfig
537
+ base_model_prefix = "blip"
538
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
539
+
540
+
541
+ BLIP_START_DOCSTRING = r"""
542
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
543
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
544
+ etc.)
545
+
546
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
547
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
548
+ behavior.
549
+
550
+ Parameters:
551
+ config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
552
+ Initializing with a config file does not load the weights associated with the model, only the
553
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
554
+ """
555
+
556
+ BLIP_VISION_INPUTS_DOCSTRING = r"""
557
+ Args:
558
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
559
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
560
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
561
+ output_attentions (`bool`, *optional*):
562
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
563
+ tensors for more detail.
564
+ output_hidden_states (`bool`, *optional*):
565
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
566
+ more detail.
567
+ return_dict (`bool`, *optional*):
568
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
569
+ """
570
+
571
+ BLIP_INPUTS_DOCSTRING = r"""
572
+ Args:
573
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
574
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
575
+ it.
576
+
577
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
578
+
579
+ [What are input IDs?](../glossary#input-ids)
580
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
581
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
582
+
583
+ - 1 for tokens that are **not masked**,
584
+ - 0 for tokens that are **masked**.
585
+
586
+ [What are attention masks?](../glossary#attention-mask)
587
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
588
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
589
+ config.max_position_embeddings - 1]`.
590
+
591
+ [What are position IDs?](../glossary#position-ids)
592
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
593
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
594
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
595
+ return_loss (`bool`, *optional*):
596
+ Whether or not to return the contrastive loss.
597
+ output_attentions (`bool`, *optional*):
598
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
599
+ tensors for more detail.
600
+ output_hidden_states (`bool`, *optional*):
601
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
602
+ more detail.
603
+ return_dict (`bool`, *optional*):
604
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
605
+ """
606
+
607
+
608
+ @keras_serializable
609
+ class TFBlipEncoder(keras.layers.Layer):
610
+ config_class = BlipConfig
611
+ """
612
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
613
+ [`BlipEncoderLayer`].
614
+
615
+ Args:
616
+ config (`BlipConfig`):
617
+ The corresponding vision configuration for the `BlipEncoder`.
618
+ """
619
+
620
+ def __init__(self, config: BlipConfig, **kwargs):
621
+ super().__init__(**kwargs)
622
+ self.config = config
623
+ self.layers = [TFBlipEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
624
+
625
+ @unpack_inputs
626
+ def call(
627
+ self,
628
+ inputs_embeds,
629
+ attention_mask: tf.Tensor | None = None,
630
+ output_attentions: Optional[bool] = None,
631
+ output_hidden_states: Optional[bool] = None,
632
+ return_dict: Optional[bool] = None,
633
+ training: Optional[bool] = None,
634
+ ) -> Union[Tuple, TFBaseModelOutput]:
635
+ r"""
636
+ Args:
637
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
638
+ Embedded representation of the inputs. Should be float, not int tokens.
639
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
640
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
641
+
642
+ - 1 for tokens that are **not masked**,
643
+ - 0 for tokens that are **masked**.
644
+
645
+ [What are attention masks?](../glossary#attention-mask)
646
+ output_attentions (`bool`, *optional*):
647
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
648
+ returned tensors for more detail.
649
+ output_hidden_states (`bool`, *optional*):
650
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
651
+ for more detail.
652
+ return_dict (`bool`, *optional*):
653
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
654
+ """
655
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
656
+ output_hidden_states = (
657
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
658
+ )
659
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
660
+
661
+ encoder_states = () if output_hidden_states else None
662
+ all_attentions = () if output_attentions else None
663
+
664
+ hidden_states = inputs_embeds
665
+ for idx, encoder_layer in enumerate(self.layers):
666
+ if output_hidden_states:
667
+ encoder_states = encoder_states + (hidden_states,)
668
+ layer_outputs = encoder_layer(
669
+ hidden_states,
670
+ attention_mask,
671
+ output_attentions=output_attentions,
672
+ training=training,
673
+ )
674
+
675
+ hidden_states = layer_outputs[0]
676
+
677
+ if output_attentions:
678
+ all_attentions = all_attentions + (layer_outputs[1],)
679
+
680
+ if output_hidden_states:
681
+ encoder_states = encoder_states + (hidden_states,)
682
+
683
+ if not return_dict:
684
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
685
+ return TFBaseModelOutput(
686
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
687
+ )
688
+
689
+ def build(self, input_shape=None):
690
+ if self.built:
691
+ return
692
+ self.built = True
693
+ if getattr(self, "layers", None) is not None:
694
+ for layer in self.layers:
695
+ with tf.name_scope(layer.name):
696
+ layer.build(None)
697
+
698
+
699
+ class TFBlipVisionModel(TFBlipPreTrainedModel):
700
+ main_input_name = "pixel_values"
701
+ config_class = BlipVisionConfig
702
+
703
+ def __init__(self, config: BlipVisionConfig, *args, **kwargs):
704
+ super().__init__(config, *args, **kwargs)
705
+ self.config = config
706
+
707
+ self.embeddings = TFBlipVisionEmbeddings(config, name="embeddings")
708
+ self.encoder = TFBlipEncoder(config, name="encoder")
709
+ self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm")
710
+ self.embed_dim = config.hidden_size
711
+
712
+ def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
713
+ hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
714
+ attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
715
+
716
+ return TFBaseModelOutputWithPooling(
717
+ last_hidden_state=output.last_hidden_state,
718
+ pooler_output=output.pooler_output,
719
+ hidden_states=hs,
720
+ attentions=attns,
721
+ )
722
+
723
+ @unpack_inputs
724
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
725
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=BlipVisionConfig)
726
+ def call(
727
+ self,
728
+ pixel_values: tf.Tensor | None = None,
729
+ output_attentions: Optional[bool] = None,
730
+ output_hidden_states: Optional[bool] = None,
731
+ return_dict: Optional[bool] = None,
732
+ training: Optional[bool] = None,
733
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
734
+ r"""
735
+ Returns:
736
+
737
+ """
738
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
739
+ output_hidden_states = (
740
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
741
+ )
742
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
743
+
744
+ if pixel_values is None:
745
+ raise ValueError("You have to specify pixel_values")
746
+
747
+ hidden_states = self.embeddings(pixel_values)
748
+
749
+ encoder_outputs = self.encoder(
750
+ inputs_embeds=hidden_states,
751
+ output_attentions=output_attentions,
752
+ output_hidden_states=output_hidden_states,
753
+ return_dict=return_dict,
754
+ training=training,
755
+ )
756
+
757
+ last_hidden_state = encoder_outputs[0]
758
+ last_hidden_state = self.post_layernorm(last_hidden_state)
759
+
760
+ pooled_output = last_hidden_state[:, 0, :]
761
+ # TF gets confused if we call the layer with inputs of different ranks, so insert a singleton dimension
762
+ pooled_output = self.post_layernorm(tf.expand_dims(pooled_output, 1))
763
+ pooled_output = tf.squeeze(pooled_output, 1)
764
+
765
+ if not return_dict:
766
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
767
+
768
+ return TFBaseModelOutputWithPooling(
769
+ last_hidden_state=last_hidden_state,
770
+ pooler_output=pooled_output,
771
+ hidden_states=encoder_outputs.hidden_states,
772
+ attentions=encoder_outputs.attentions,
773
+ )
774
+
775
+ def get_input_embeddings(self):
776
+ return self.embeddings
777
+
778
+ def build(self, input_shape=None):
779
+ if self.built:
780
+ return
781
+ self.built = True
782
+ if getattr(self, "embeddings", None) is not None:
783
+ with tf.name_scope(self.embeddings.name):
784
+ self.embeddings.build(None)
785
+ if getattr(self, "encoder", None) is not None:
786
+ with tf.name_scope(self.encoder.name):
787
+ self.encoder.build(None)
788
+ if getattr(self, "post_layernorm", None) is not None:
789
+ with tf.name_scope(self.post_layernorm.name):
790
+ self.post_layernorm.build([None, None, self.embed_dim])
791
+
792
+
793
+ class TFBlipMainLayer(keras.layers.Layer):
794
+ config_class = BlipConfig
795
+
796
+ def __init__(self, config: BlipConfig, *args, **kwargs):
797
+ super().__init__(*args, **kwargs)
798
+
799
+ if not isinstance(config.text_config, BlipTextConfig):
800
+ raise ValueError(
801
+ "config.text_config is expected to be of type BlipTextConfig but is of type"
802
+ f" {type(config.text_config)}."
803
+ )
804
+
805
+ if not isinstance(config.vision_config, BlipVisionConfig):
806
+ raise ValueError(
807
+ "config.vision_config is expected to be of type BlipVisionConfig but is of type"
808
+ f" {type(config.vision_config)}."
809
+ )
810
+
811
+ text_config = config.text_config
812
+ vision_config = config.vision_config
813
+
814
+ self.projection_dim = config.projection_dim
815
+ self.text_embed_dim = text_config.hidden_size
816
+ self.vision_embed_dim = vision_config.hidden_size
817
+
818
+ self.text_model = TFBlipTextModel(text_config, name="text_model")
819
+ self.vision_model = TFBlipVisionModel(vision_config, name="vision_model")
820
+
821
+ self.visual_projection = keras.layers.Dense(
822
+ self.projection_dim,
823
+ use_bias=False,
824
+ kernel_initializer=get_initializer(config.initializer_range),
825
+ name="visual_projection",
826
+ )
827
+ self.text_projection = keras.layers.Dense(
828
+ self.projection_dim,
829
+ use_bias=False,
830
+ kernel_initializer=get_initializer(config.initializer_range),
831
+ name="text_projection",
832
+ )
833
+
834
+ self.config = config
835
+
836
+ def build(self, input_shape=None):
837
+ self.logit_scale = self.add_weight(
838
+ name="logit_scale",
839
+ shape=[],
840
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
841
+ trainable=True,
842
+ )
843
+
844
+ if self.built:
845
+ return
846
+ self.built = True
847
+ if getattr(self, "text_model", None) is not None:
848
+ with tf.name_scope(self.text_model.name):
849
+ self.text_model.build(None)
850
+ if getattr(self, "vision_model", None) is not None:
851
+ with tf.name_scope(self.vision_model.name):
852
+ self.vision_model.build(None)
853
+ if getattr(self, "visual_projection", None) is not None:
854
+ with tf.name_scope(self.visual_projection.name):
855
+ self.visual_projection.build([None, None, self.vision_embed_dim])
856
+ if getattr(self, "text_projection", None) is not None:
857
+ with tf.name_scope(self.text_projection.name):
858
+ self.text_projection.build([None, None, self.text_embed_dim])
859
+
860
+ @unpack_inputs
861
+ def call(
862
+ self,
863
+ input_ids: tf.Tensor | None = None,
864
+ pixel_values: tf.Tensor | None = None,
865
+ attention_mask: tf.Tensor | None = None,
866
+ position_ids: tf.Tensor | None = None,
867
+ return_loss: Optional[bool] = None,
868
+ output_attentions: Optional[bool] = None,
869
+ output_hidden_states: Optional[bool] = None,
870
+ return_dict: Optional[bool] = None,
871
+ training: Optional[bool] = None,
872
+ ) -> Union[Tuple, TFBlipOutput]:
873
+ # Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
874
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
875
+ output_hidden_states = (
876
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
877
+ )
878
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
879
+
880
+ vision_outputs = self.vision_model(
881
+ pixel_values=pixel_values,
882
+ output_attentions=output_attentions,
883
+ output_hidden_states=output_hidden_states,
884
+ return_dict=return_dict,
885
+ training=training,
886
+ )
887
+
888
+ text_outputs = self.text_model(
889
+ input_ids=input_ids,
890
+ attention_mask=attention_mask,
891
+ position_ids=position_ids,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ training=training,
896
+ )
897
+
898
+ image_embeds = vision_outputs[1]
899
+ image_embeds = self.visual_projection(image_embeds)
900
+
901
+ text_embeds = text_outputs[1]
902
+ text_embeds = self.text_projection(text_embeds)
903
+
904
+ # normalized features
905
+ image_embeds = image_embeds / tf.norm(image_embeds, ord=2, axis=-1, keepdims=True)
906
+ text_embeds = text_embeds / tf.norm(text_embeds, ord=2, axis=-1, keepdims=True)
907
+
908
+ # cosine similarity as logits
909
+ logit_scale = tf.exp(self.logit_scale)
910
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
911
+ logits_per_image = tf.transpose(logits_per_text)
912
+
913
+ loss = None
914
+ if return_loss:
915
+ loss = blip_loss(logits_per_text)
916
+ loss = tf.reshape(loss, (1,))
917
+
918
+ if not return_dict:
919
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
920
+ return ((loss,) + output) if loss is not None else output
921
+
922
+ return TFBlipOutput(
923
+ loss=loss,
924
+ logits_per_image=logits_per_image,
925
+ logits_per_text=logits_per_text,
926
+ text_embeds=text_embeds,
927
+ image_embeds=image_embeds,
928
+ text_model_output=text_outputs,
929
+ vision_model_output=vision_outputs,
930
+ )
931
+
932
+
933
+ class TFBlipModel(TFBlipPreTrainedModel):
934
+ config_class = BlipConfig
935
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
936
+ main_input_name = "input_ids"
937
+
938
+ def __init__(self, config: BlipConfig, *inputs, **kwargs):
939
+ super().__init__(config, *inputs, **kwargs)
940
+
941
+ self.blip = TFBlipMainLayer(config, name="blip")
942
+
943
+ def serving_output(self, output: TFBlipOutput) -> TFBlipOutput:
944
+ return TFBlipOutput(
945
+ logits_per_image=output.logits_per_image,
946
+ logits_per_text=output.logits_per_text,
947
+ text_embeds=output.text_embeds,
948
+ image_embeds=output.image_embeds,
949
+ )
950
+
951
+ @unpack_inputs
952
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
953
+ @replace_return_docstrings(output_type=TFBlipOutput, config_class=BlipConfig)
954
+ def call(
955
+ self,
956
+ input_ids: tf.Tensor | None = None,
957
+ pixel_values: tf.Tensor | None = None,
958
+ attention_mask: tf.Tensor | None = None,
959
+ position_ids: tf.Tensor | None = None,
960
+ return_loss: Optional[bool] = None,
961
+ output_attentions: Optional[bool] = None,
962
+ output_hidden_states: Optional[bool] = None,
963
+ return_dict: Optional[bool] = None,
964
+ training: Optional[bool] = None,
965
+ ) -> Union[Tuple, TFBlipOutput]:
966
+ r"""
967
+ Returns:
968
+
969
+ Examples:
970
+
971
+ ```python
972
+ >>> from PIL import Image
973
+ >>> import requests
974
+ >>> from transformers import AutoProcessor, TFBlipModel
975
+
976
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
977
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
978
+
979
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
980
+ >>> image = Image.open(requests.get(url, stream=True).raw)
981
+
982
+ >>> inputs = processor(
983
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
984
+ ... )
985
+
986
+ >>> outputs = model(**inputs)
987
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
988
+ >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
989
+ ```"""
990
+ outputs = self.blip(
991
+ input_ids=input_ids,
992
+ pixel_values=pixel_values,
993
+ attention_mask=attention_mask,
994
+ position_ids=position_ids,
995
+ return_loss=return_loss,
996
+ output_attentions=output_attentions,
997
+ output_hidden_states=output_hidden_states,
998
+ return_dict=return_dict,
999
+ training=training,
1000
+ )
1001
+ return outputs
1002
+
1003
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
1004
+ def get_text_features(
1005
+ self,
1006
+ input_ids: tf.Tensor | None = None,
1007
+ attention_mask: tf.Tensor | None = None,
1008
+ position_ids: tf.Tensor | None = None,
1009
+ return_dict: Optional[bool] = None,
1010
+ ) -> tf.Tensor:
1011
+ r"""
1012
+ Returns:
1013
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
1014
+ the projection layer to the pooled output of [`TFBlipTextModel`].
1015
+
1016
+ Examples:
1017
+
1018
+ ```python
1019
+ >>> from transformers import AutoProcessor, TFBlipModel
1020
+
1021
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
1022
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1023
+
1024
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
1025
+ >>> text_features = model.get_text_features(**inputs)
1026
+ ```"""
1027
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1028
+
1029
+ text_outputs = self.blip.text_model(
1030
+ input_ids=input_ids,
1031
+ attention_mask=attention_mask,
1032
+ position_ids=position_ids,
1033
+ return_dict=return_dict,
1034
+ )
1035
+
1036
+ pooled_output = text_outputs[1]
1037
+ text_features = self.blip.text_projection(pooled_output)
1038
+
1039
+ return text_features
1040
+
1041
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1042
+ def get_image_features(
1043
+ self,
1044
+ pixel_values: tf.Tensor | None = None,
1045
+ return_dict: Optional[bool] = None,
1046
+ ) -> tf.Tensor:
1047
+ r"""
1048
+ Returns:
1049
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
1050
+ the projection layer to the pooled output of [`TFBlipVisionModel`].
1051
+
1052
+ Examples:
1053
+
1054
+ ```python
1055
+ >>> from PIL import Image
1056
+ >>> import requests
1057
+ >>> from transformers import AutoProcessor, TFBlipModel
1058
+
1059
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
1060
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1061
+
1062
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1063
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1064
+
1065
+ >>> inputs = processor(images=image, return_tensors="tf")
1066
+
1067
+ >>> image_features = model.get_image_features(**inputs)
1068
+ ```"""
1069
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1070
+
1071
+ vision_outputs = self.blip.vision_model(pixel_values=pixel_values, return_dict=return_dict)
1072
+
1073
+ pooled_output = vision_outputs[1] # pooled_output
1074
+ image_features = self.blip.visual_projection(pooled_output)
1075
+
1076
+ return image_features
1077
+
1078
+ def build(self, input_shape=None):
1079
+ if self.built:
1080
+ return
1081
+ self.built = True
1082
+ if getattr(self, "blip", None) is not None:
1083
+ with tf.name_scope(self.blip.name):
1084
+ self.blip.build(None)
1085
+
1086
+
1087
+ @add_start_docstrings(
1088
+ """
1089
+ BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
1090
+ `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
1091
+ the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
1092
+ from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
1093
+ """,
1094
+ BLIP_START_DOCSTRING,
1095
+ )
1096
+ class TFBlipForConditionalGeneration(TFBlipPreTrainedModel):
1097
+ config_class = BlipConfig
1098
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
1099
+ main_input_name = "pixel_values"
1100
+
1101
+ def __init__(self, config: BlipConfig, *args, **kwargs):
1102
+ super().__init__(config, *args, **kwargs)
1103
+
1104
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
1105
+
1106
+ self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder")
1107
+
1108
+ self.decoder_input_ids = config.text_config.bos_token_id
1109
+ self.decoder_pad_token_id = config.text_config.pad_token_id
1110
+
1111
+ def get_input_embeddings(self) -> keras.layers.Layer:
1112
+ return self.vision_model.embeddings.patch_embedding
1113
+
1114
+ @unpack_inputs
1115
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1116
+ @replace_return_docstrings(output_type=TFBlipForConditionalGenerationModelOutput, config_class=BlipConfig)
1117
+ def call(
1118
+ self,
1119
+ pixel_values: tf.Tensor,
1120
+ input_ids: tf.Tensor | None = None,
1121
+ attention_mask: tf.Tensor | None = None,
1122
+ output_attentions: Optional[bool] = None,
1123
+ output_hidden_states: Optional[bool] = None,
1124
+ labels: tf.Tensor | None = None,
1125
+ return_dict: Optional[bool] = None,
1126
+ training: Optional[bool] = None,
1127
+ ) -> Union[Tuple, TFBlipForConditionalGenerationModelOutput]:
1128
+ r"""
1129
+ Returns:
1130
+
1131
+ Examples:
1132
+
1133
+ ```python
1134
+ >>> from PIL import Image
1135
+ >>> import requests
1136
+ >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration
1137
+
1138
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1139
+ >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
1140
+
1141
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1142
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1143
+ >>> text = "A picture of"
1144
+
1145
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1146
+
1147
+ >>> outputs = model(**inputs)
1148
+ ```"""
1149
+
1150
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1151
+ vision_outputs = self.vision_model(
1152
+ pixel_values=pixel_values,
1153
+ output_attentions=output_attentions,
1154
+ output_hidden_states=output_hidden_states,
1155
+ return_dict=return_dict,
1156
+ training=training,
1157
+ )
1158
+
1159
+ image_embeds = vision_outputs[0]
1160
+
1161
+ outputs = self.text_decoder(
1162
+ input_ids=input_ids,
1163
+ attention_mask=attention_mask,
1164
+ encoder_hidden_states=image_embeds,
1165
+ labels=labels,
1166
+ return_dict=False,
1167
+ training=training,
1168
+ )
1169
+
1170
+ if not return_dict:
1171
+ outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]
1172
+ return tuple(output for output in outputs if output is not None)
1173
+
1174
+ if labels is not None:
1175
+ loss = outputs[0]
1176
+ logits = outputs[1]
1177
+ else:
1178
+ loss = None
1179
+ logits = outputs[0]
1180
+
1181
+ if loss is not None and loss.shape.rank == 0:
1182
+ loss = tf.reshape(loss, (1,))
1183
+
1184
+ return TFBlipForConditionalGenerationModelOutput(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ image_embeds=image_embeds,
1188
+ last_hidden_state=vision_outputs.last_hidden_state,
1189
+ hidden_states=vision_outputs.hidden_states,
1190
+ attentions=vision_outputs.attentions,
1191
+ )
1192
+
1193
+ def generate(
1194
+ self,
1195
+ pixel_values: tf.Tensor,
1196
+ input_ids: tf.Tensor | None = None,
1197
+ attention_mask: tf.Tensor | None = None,
1198
+ **generate_kwargs,
1199
+ ) -> tf.Tensor:
1200
+ r"""
1201
+ Overrides *generate* function to be able to use the model as a conditional generator
1202
+
1203
+ Parameters:
1204
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`:
1205
+ Input image to be processed
1206
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1207
+ The sequence used as a prompt for the generation.
1208
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1209
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1210
+
1211
+
1212
+ Examples:
1213
+ ```python
1214
+ >>> from PIL import Image
1215
+ >>> import requests
1216
+ >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration
1217
+
1218
+ >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
1219
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1220
+
1221
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1222
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1223
+
1224
+ >>> inputs = processor(images=image, return_tensors="tf")
1225
+
1226
+ >>> outputs = model.generate(**inputs)
1227
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1228
+ two cats sleeping on a couch
1229
+ ```
1230
+ """
1231
+
1232
+ batch_size = pixel_values.shape[0]
1233
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1234
+
1235
+ image_embeds = vision_outputs[0]
1236
+
1237
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32)
1238
+
1239
+ if isinstance(input_ids, list):
1240
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int32)
1241
+ elif input_ids is None:
1242
+ input_ids = tf.convert_to_tensor(
1243
+ [[self.decoder_input_ids, self.config.text_config.eos_token_id]], dtype=tf.int32
1244
+ )
1245
+
1246
+ input_ids = tf.tile(input_ids, (batch_size, 1))
1247
+
1248
+ # PyTorch: input_ids[:, 0] = self.config.text_config.bos_token_id
1249
+ input_ids = tf.concat(
1250
+ [tf.ones((batch_size, 1), dtype=tf.int32) * self.config.text_config.bos_token_id, input_ids[:, 1:]], axis=1
1251
+ )
1252
+ attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
1253
+
1254
+ outputs = self.text_decoder.generate(
1255
+ input_ids=input_ids[:, :-1],
1256
+ eos_token_id=self.config.text_config.sep_token_id,
1257
+ pad_token_id=self.config.text_config.pad_token_id,
1258
+ attention_mask=attention_mask,
1259
+ encoder_hidden_states=image_embeds,
1260
+ encoder_attention_mask=image_attention_mask,
1261
+ **generate_kwargs,
1262
+ )
1263
+
1264
+ return outputs
1265
+
1266
+ def build(self, input_shape=None):
1267
+ if self.built:
1268
+ return
1269
+ self.built = True
1270
+ if getattr(self, "vision_model", None) is not None:
1271
+ with tf.name_scope(self.vision_model.name):
1272
+ self.vision_model.build(None)
1273
+ if getattr(self, "text_decoder", None) is not None:
1274
+ with tf.name_scope(self.text_decoder.name):
1275
+ self.text_decoder.build(None)
1276
+
1277
+
1278
+ @add_start_docstrings(
1279
+ """
1280
+ BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
1281
+ decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
1282
+ with the encoding of the image, and the text decoder will output the answer to the question.
1283
+ """,
1284
+ BLIP_START_DOCSTRING,
1285
+ )
1286
+ class TFBlipForQuestionAnswering(TFBlipPreTrainedModel):
1287
+ config_class = BlipConfig
1288
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
1289
+
1290
+ def __init__(self, config: BlipConfig, *args, **kwargs):
1291
+ super().__init__(config, *args, **kwargs)
1292
+
1293
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
1294
+
1295
+ self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False)
1296
+
1297
+ self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder")
1298
+
1299
+ self.decoder_pad_token_id = config.text_config.pad_token_id
1300
+ self.decoder_start_token_id = config.text_config.bos_token_id
1301
+
1302
+ def get_input_embeddings(self) -> keras.layers.Layer:
1303
+ return self.vision_model.embeddings.patch_embedding
1304
+
1305
+ # Adapted from transformers.models.t5.modeling_tf_t5.TFT5PreTrainedModel._shift_right
1306
+ def _shift_right(self, input_ids):
1307
+ decoder_start_token_id = self.decoder_start_token_id
1308
+ pad_token_id = self.decoder_pad_token_id
1309
+
1310
+ if decoder_start_token_id is None or pad_token_id is None:
1311
+ raise ValueError("decoder_start_token_id and pad_token_id must be defined!")
1312
+
1313
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
1314
+ start_tokens = tf.cast(start_tokens, input_ids.dtype) # Ensure compatible dtypes for concatenation
1315
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
1316
+
1317
+ # replace possible -100 values in labels by `pad_token_id`
1318
+ shifted_input_ids = tf.where(
1319
+ shifted_input_ids == -100,
1320
+ tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype),
1321
+ shifted_input_ids,
1322
+ )
1323
+
1324
+ # "Verify that `labels` has only positive values and -100"
1325
+ tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype))
1326
+
1327
+ return shifted_input_ids
1328
+
1329
+ @unpack_inputs
1330
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1331
+ @replace_return_docstrings(output_type=TFBlipTextVisionModelOutput, config_class=BlipVisionConfig)
1332
+ def call(
1333
+ self,
1334
+ input_ids: tf.Tensor,
1335
+ pixel_values: tf.Tensor | None = None,
1336
+ decoder_input_ids: tf.Tensor | None = None,
1337
+ decoder_attention_mask: tf.Tensor | None = None,
1338
+ attention_mask: tf.Tensor | None = None,
1339
+ output_attentions: Optional[bool] = None,
1340
+ output_hidden_states: Optional[bool] = None,
1341
+ labels: tf.Tensor | None = None,
1342
+ return_dict: Optional[bool] = None,
1343
+ training: Optional[bool] = None,
1344
+ ) -> Union[Tuple, TFBlipTextVisionModelOutput]:
1345
+ r"""
1346
+ Returns:
1347
+
1348
+ Examples:
1349
+
1350
+ ```python
1351
+ >>> from PIL import Image
1352
+ >>> import requests
1353
+ >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering
1354
+
1355
+ >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1356
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1357
+
1358
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1359
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1360
+
1361
+ >>> # training
1362
+ >>> text = "How many cats are in the picture?"
1363
+ >>> label = "2"
1364
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1365
+ >>> labels = processor(text=label, return_tensors="tf").input_ids
1366
+
1367
+ >>> inputs["labels"] = labels
1368
+ >>> outputs = model(**inputs)
1369
+ >>> loss = outputs.loss
1370
+
1371
+ >>> # inference
1372
+ >>> text = "How many cats are in the picture?"
1373
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1374
+ >>> outputs = model.generate(**inputs)
1375
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1376
+ 2
1377
+ ```"""
1378
+ if labels is None and decoder_input_ids is None:
1379
+ raise ValueError(
1380
+ "Either `decoder_input_ids` or `labels` should be passed when calling"
1381
+ " `TFBlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
1382
+ " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`"
1383
+ )
1384
+
1385
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1386
+
1387
+ vision_outputs = self.vision_model(
1388
+ pixel_values=pixel_values,
1389
+ output_attentions=output_attentions,
1390
+ output_hidden_states=output_hidden_states,
1391
+ return_dict=return_dict,
1392
+ training=training,
1393
+ )
1394
+
1395
+ image_embeds = vision_outputs[0]
1396
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64)
1397
+
1398
+ question_embeds = self.text_encoder(
1399
+ input_ids=input_ids,
1400
+ attention_mask=attention_mask,
1401
+ encoder_hidden_states=image_embeds,
1402
+ encoder_attention_mask=image_attention_mask,
1403
+ return_dict=return_dict,
1404
+ training=training,
1405
+ )
1406
+
1407
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1408
+
1409
+ if labels is not None and decoder_input_ids is None:
1410
+ # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153
1411
+ decoder_input_ids = labels
1412
+
1413
+ answer_output = self.text_decoder(
1414
+ input_ids=decoder_input_ids,
1415
+ attention_mask=decoder_attention_mask,
1416
+ encoder_hidden_states=question_embeds,
1417
+ encoder_attention_mask=attention_mask,
1418
+ labels=labels,
1419
+ return_dict=return_dict,
1420
+ training=training,
1421
+ )
1422
+
1423
+ if labels is not None:
1424
+ decoder_loss = tf.reduce_mean(answer_output.loss) if return_dict else tf.reduce_mean(answer_output[0])
1425
+ else:
1426
+ decoder_loss = None
1427
+
1428
+ if not return_dict:
1429
+ outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
1430
+ return tuple(output for output in outputs if output is not None)
1431
+
1432
+ return TFBlipTextVisionModelOutput(
1433
+ loss=decoder_loss,
1434
+ image_embeds=image_embeds,
1435
+ last_hidden_state=vision_outputs.last_hidden_state,
1436
+ hidden_states=vision_outputs.hidden_states,
1437
+ attentions=vision_outputs.attentions,
1438
+ )
1439
+
1440
+ def generate(
1441
+ self,
1442
+ input_ids: tf.Tensor,
1443
+ pixel_values: tf.Tensor,
1444
+ attention_mask: tf.Tensor | None = None,
1445
+ **generate_kwargs,
1446
+ ) -> tf.Tensor:
1447
+ r"""
1448
+ Overrides *generate* function to be able to use the model as a conditional generator
1449
+
1450
+ Parameters:
1451
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
1452
+ The sequence used as a prompt for the generation.
1453
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`:
1454
+ Input image to be processed
1455
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1456
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
1457
+ tokens that are NOT MASKED, `0` for MASKED tokens.
1458
+ generate_kwargs (dict, *optional*):
1459
+ Additional arguments passed to the `generate` function of the decoder
1460
+
1461
+
1462
+ Examples:
1463
+ ```python
1464
+ >>> from PIL import Image
1465
+ >>> import requests
1466
+ >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering
1467
+
1468
+ >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1469
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1470
+
1471
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1472
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1473
+ >>> text = "How many cats are in the picture?"
1474
+
1475
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1476
+
1477
+ >>> outputs = model.generate(**inputs)
1478
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1479
+ 2
1480
+ ```
1481
+ """
1482
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1483
+
1484
+ image_embeds = vision_outputs[0]
1485
+
1486
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32)
1487
+
1488
+ if isinstance(input_ids, list):
1489
+ input_ids = tf.Tensor(input_ids)
1490
+
1491
+ question_outputs = self.text_encoder(
1492
+ input_ids=input_ids,
1493
+ attention_mask=attention_mask,
1494
+ encoder_hidden_states=image_embeds,
1495
+ encoder_attention_mask=image_attention_mask,
1496
+ return_dict=False,
1497
+ )
1498
+
1499
+ question_embeds = question_outputs[0]
1500
+
1501
+ question_attention_mask = tf.ones(shape_list(question_embeds)[:-1], dtype=tf.int32)
1502
+
1503
+ bos_ids = tf.fill(
1504
+ (tf.shape(question_embeds)[0], 1), value=tf.cast(self.decoder_start_token_id, input_ids.dtype)
1505
+ )
1506
+
1507
+ outputs = self.text_decoder.generate(
1508
+ input_ids=bos_ids,
1509
+ eos_token_id=self.config.text_config.sep_token_id,
1510
+ pad_token_id=self.config.text_config.pad_token_id,
1511
+ encoder_hidden_states=question_embeds,
1512
+ encoder_attention_mask=question_attention_mask,
1513
+ **generate_kwargs,
1514
+ )
1515
+
1516
+ return outputs
1517
+
1518
+ def build(self, input_shape=None):
1519
+ if self.built:
1520
+ return
1521
+ self.built = True
1522
+ if getattr(self, "vision_model", None) is not None:
1523
+ with tf.name_scope(self.vision_model.name):
1524
+ self.vision_model.build(None)
1525
+ if getattr(self, "text_encoder", None) is not None:
1526
+ with tf.name_scope(self.text_encoder.name):
1527
+ self.text_encoder.build(None)
1528
+ if getattr(self, "text_decoder", None) is not None:
1529
+ with tf.name_scope(self.text_decoder.name):
1530
+ self.text_decoder.build(None)
1531
+
1532
+
1533
+ @add_start_docstrings(
1534
+ """
1535
+ BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
1536
+ image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
1537
+ the image.
1538
+ """,
1539
+ BLIP_START_DOCSTRING,
1540
+ )
1541
+ class TFBlipForImageTextRetrieval(TFBlipPreTrainedModel):
1542
+ config_class = BlipConfig
1543
+
1544
+ def __init__(self, config: BlipConfig, *args, **kwargs):
1545
+ super().__init__(config, *args, **kwargs)
1546
+
1547
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
1548
+
1549
+ self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False)
1550
+
1551
+ # vision projection layer
1552
+ self.vision_proj = keras.layers.Dense(
1553
+ config.image_text_hidden_size,
1554
+ kernel_initializer=get_initializer(config.initializer_range),
1555
+ name="vision_proj",
1556
+ )
1557
+
1558
+ # text projection layer
1559
+ self.text_proj = keras.layers.Dense(
1560
+ config.image_text_hidden_size,
1561
+ kernel_initializer=get_initializer(config.initializer_range),
1562
+ name="text_proj",
1563
+ )
1564
+
1565
+ # image text matching head
1566
+ self.itm_head = keras.layers.Dense(
1567
+ 2, kernel_initializer=get_initializer(config.initializer_range), name="itm_head"
1568
+ )
1569
+
1570
+ self.decoder_pad_token_id = (
1571
+ config.text_config.pad_token_id
1572
+ if not hasattr(config, "decoder_pad_token_id")
1573
+ else config.decoder_pad_token_id
1574
+ )
1575
+ self.decoder_start_token_id = (
1576
+ config.text_config.bos_token_id
1577
+ if not hasattr(config, "decoder_start_token_id")
1578
+ else config.decoder_start_token_id
1579
+ )
1580
+ self.config = config
1581
+
1582
+ def get_input_embeddings(self) -> keras.layers.Layer:
1583
+ return self.vision_model.embeddings.patch_embedding
1584
+
1585
+ @unpack_inputs
1586
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1587
+ @replace_return_docstrings(output_type=TFBlipImageTextMatchingModelOutput, config_class=BlipVisionConfig)
1588
+ def call(
1589
+ self,
1590
+ input_ids: tf.Tensor,
1591
+ pixel_values: tf.Tensor | None = None,
1592
+ use_itm_head: Optional[bool] = True,
1593
+ attention_mask: tf.Tensor | None = None,
1594
+ output_attentions: Optional[bool] = None,
1595
+ output_hidden_states: Optional[bool] = None,
1596
+ return_dict: Optional[bool] = None,
1597
+ training: Optional[bool] = None,
1598
+ ) -> Union[Tuple, TFBlipImageTextMatchingModelOutput]:
1599
+ r"""
1600
+ Returns:
1601
+
1602
+ Examples:
1603
+
1604
+ ```python
1605
+ >>> from PIL import Image
1606
+ >>> import requests
1607
+ >>> from transformers import AutoProcessor, TFBlipForImageTextRetrieval
1608
+
1609
+ >>> model = TFBlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
1610
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
1611
+
1612
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1613
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1614
+ >>> text = "an image of a cat"
1615
+
1616
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1617
+ >>> outputs = model(**inputs)
1618
+ ```
1619
+ """
1620
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1621
+
1622
+ vision_outputs = self.vision_model(
1623
+ pixel_values=pixel_values,
1624
+ output_attentions=output_attentions,
1625
+ output_hidden_states=output_hidden_states,
1626
+ return_dict=return_dict,
1627
+ training=training,
1628
+ )
1629
+
1630
+ image_embeds = vision_outputs[0]
1631
+ image_atts = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64)
1632
+
1633
+ # Matt: In PyTorch, only one path (itm/non-itm) is taken. However, in TensorFlow this can result in
1634
+ # some layers not being built! To avoid this, we always call both paths, then use an if statement to select
1635
+ # which output to pass to the final output. The unnecessary nodes will be pruned from the final graph, but
1636
+ # not before the layers have all been built correctly.
1637
+ itm_question_embeds = self.text_encoder(
1638
+ input_ids=input_ids,
1639
+ attention_mask=attention_mask,
1640
+ encoder_hidden_states=image_embeds,
1641
+ encoder_attention_mask=image_atts,
1642
+ return_dict=return_dict,
1643
+ training=training,
1644
+ )
1645
+ itm_question_embeds = itm_question_embeds[0] if not return_dict else itm_question_embeds.last_hidden_state
1646
+
1647
+ itm_output = self.itm_head(itm_question_embeds[:, 0, :])
1648
+
1649
+ no_itm_question_embeds = self.text_encoder(
1650
+ input_ids=input_ids,
1651
+ attention_mask=attention_mask,
1652
+ return_dict=return_dict,
1653
+ training=training,
1654
+ )
1655
+ no_itm_question_embeds = (
1656
+ no_itm_question_embeds[0] if not return_dict else no_itm_question_embeds.last_hidden_state
1657
+ )
1658
+
1659
+ image_feat, _ = tf.linalg.normalize(self.vision_proj(image_embeds[:, 0, :]), ord=2, axis=-1)
1660
+ text_feat, _ = tf.linalg.normalize(self.text_proj(no_itm_question_embeds[:, 0, :]), ord=2, axis=-1)
1661
+
1662
+ no_itm_output = tf.matmul(image_feat, text_feat, transpose_b=True)
1663
+
1664
+ if use_itm_head:
1665
+ output = itm_output
1666
+ question_embeds = itm_question_embeds
1667
+ else:
1668
+ output = no_itm_output
1669
+ question_embeds = no_itm_question_embeds
1670
+
1671
+ if not return_dict:
1672
+ outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
1673
+ return tuple(output for output in outputs if output is not None)
1674
+
1675
+ return TFBlipImageTextMatchingModelOutput(
1676
+ itm_score=output,
1677
+ last_hidden_state=vision_outputs.last_hidden_state,
1678
+ hidden_states=vision_outputs.hidden_states,
1679
+ attentions=vision_outputs.attentions,
1680
+ question_embeds=question_embeds,
1681
+ )
1682
+
1683
+ def build(self, input_shape=None):
1684
+ if self.built:
1685
+ return
1686
+ self.built = True
1687
+ if getattr(self, "vision_model", None) is not None:
1688
+ with tf.name_scope(self.vision_model.name):
1689
+ self.vision_model.build(None)
1690
+ if getattr(self, "text_encoder", None) is not None:
1691
+ with tf.name_scope(self.text_encoder.name):
1692
+ self.text_encoder.build(None)
1693
+ if getattr(self, "vision_proj", None) is not None:
1694
+ with tf.name_scope(self.vision_proj.name):
1695
+ self.vision_proj.build([None, None, self.config.vision_config.hidden_size])
1696
+ if getattr(self, "text_proj", None) is not None:
1697
+ with tf.name_scope(self.text_proj.name):
1698
+ self.text_proj.build([None, None, self.config.text_config.hidden_size])
1699
+ if getattr(self, "itm_head", None) is not None:
1700
+ with tf.name_scope(self.itm_head.name):
1701
+ self.itm_head.build([None, None, self.config.text_config.hidden_size])
venv/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip_text.py ADDED
@@ -0,0 +1,1122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the BSD-3-clause license (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # https://opensource.org/licenses/BSD-3-Clause
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from __future__ import annotations
18
+
19
+ import math
20
+ from typing import Optional, Tuple
21
+
22
+ import tensorflow as tf
23
+
24
+ from ...modeling_tf_outputs import (
25
+ TFBaseModelOutputWithPastAndCrossAttentions,
26
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
27
+ TFCausalLMOutputWithCrossAttentions,
28
+ )
29
+ from ...modeling_tf_utils import (
30
+ TFModelInputType,
31
+ TFPreTrainedModel,
32
+ get_initializer,
33
+ get_tf_activation,
34
+ keras,
35
+ keras_serializable,
36
+ shape_list,
37
+ unpack_inputs,
38
+ )
39
+ from ...tf_utils import check_embeddings_within_bounds, invert_attention_mask, stable_softmax
40
+ from ...utils import add_start_docstrings_to_model_forward, logging
41
+ from .configuration_blip import BlipTextConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ BLIP_TEXT_INPUTS_DOCSTRING = r"""
47
+ Args:
48
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
49
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
50
+ it.
51
+
52
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
53
+
54
+ [What are input IDs?](../glossary#input-ids)
55
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
56
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
57
+
58
+ - 1 for tokens that are **not masked**,
59
+ - 0 for tokens that are **masked**.
60
+
61
+ [What are attention masks?](../glossary#attention-mask)
62
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
63
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
64
+ config.max_position_embeddings - 1]`.
65
+
66
+ [What are position IDs?](../glossary#position-ids)
67
+ output_attentions (`bool`, *optional*):
68
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
69
+ tensors for more detail.
70
+ output_hidden_states (`bool`, *optional*):
71
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
72
+ more detail.
73
+ return_dict (`bool`, *optional*):
74
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
75
+ """
76
+
77
+
78
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L52
79
+ class TFBlipTextEmbeddings(keras.layers.Layer):
80
+ """Construct the embeddings from word and position embeddings."""
81
+
82
+ def __init__(self, config, **kwargs):
83
+ super().__init__(**kwargs)
84
+ self.word_embeddings = keras.layers.Embedding(
85
+ config.vocab_size,
86
+ config.hidden_size,
87
+ embeddings_initializer=get_initializer(config.initializer_range),
88
+ name="word_embeddings",
89
+ )
90
+ self.position_embeddings = keras.layers.Embedding(
91
+ config.max_position_embeddings,
92
+ config.hidden_size,
93
+ embeddings_initializer=get_initializer(config.initializer_range),
94
+ name="position_embeddings",
95
+ )
96
+
97
+ # self.LayerNorm is not snake-cased to stick with PyTorch model variable name and be able to load
98
+ # any TensorFlow checkpoint file
99
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
100
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
101
+
102
+ self.position_ids = tf.expand_dims(tf.range(config.max_position_embeddings), 0)
103
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
104
+
105
+ self.config = config
106
+
107
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, training=None):
108
+ if input_ids is not None:
109
+ input_shape = tf.shape(input_ids)
110
+ else:
111
+ input_shape = tf.shape(inputs_embeds)[:-1]
112
+
113
+ seq_length = input_shape[1]
114
+
115
+ if position_ids is None:
116
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
117
+
118
+ if inputs_embeds is None:
119
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
120
+ inputs_embeds = self.word_embeddings(input_ids)
121
+
122
+ embeddings = inputs_embeds
123
+
124
+ if self.position_embedding_type == "absolute":
125
+ position_embeddings = self.position_embeddings(position_ids)
126
+ embeddings += position_embeddings
127
+ embeddings = self.LayerNorm(embeddings)
128
+ embeddings = self.dropout(embeddings, training=training)
129
+ return embeddings
130
+
131
+ def build(self, input_shape=None):
132
+ if self.built:
133
+ return
134
+ self.built = True
135
+ if getattr(self, "word_embeddings", None) is not None:
136
+ with tf.name_scope(self.word_embeddings.name):
137
+ self.word_embeddings.build(None)
138
+ if getattr(self, "position_embeddings", None) is not None:
139
+ with tf.name_scope(self.position_embeddings.name):
140
+ self.position_embeddings.build(None)
141
+ if getattr(self, "LayerNorm", None) is not None:
142
+ with tf.name_scope(self.LayerNorm.name):
143
+ self.LayerNorm.build([None, None, self.config.hidden_size])
144
+ if getattr(self, "dropout", None) is not None:
145
+ with tf.name_scope(self.dropout.name):
146
+ self.dropout.build(None)
147
+
148
+
149
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L97
150
+ class TFBlipTextSelfAttention(keras.layers.Layer):
151
+ def __init__(self, config, is_cross_attention, **kwargs):
152
+ super().__init__(**kwargs)
153
+ self.config = config
154
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
155
+ raise ValueError(
156
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
157
+ % (config.hidden_size, config.num_attention_heads)
158
+ )
159
+
160
+ self.num_attention_heads = config.num_attention_heads
161
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
162
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
163
+
164
+ self.query = keras.layers.Dense(
165
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
166
+ )
167
+ self.key = keras.layers.Dense(
168
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
169
+ )
170
+ self.value = keras.layers.Dense(
171
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
172
+ )
173
+
174
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
175
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
176
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
177
+ self.max_position_embeddings = config.max_position_embeddings
178
+ self.distance_embedding = keras.layers.Embedding(
179
+ 2 * config.max_position_embeddings - 1, self.attention_head_size
180
+ )
181
+ self.is_cross_attention = is_cross_attention
182
+
183
+ def transpose_for_scores(self, x):
184
+ new_x_shape = tf.concat(
185
+ [tf.shape(x)[:-1], tf.constant([self.num_attention_heads, self.attention_head_size], dtype=tf.int32)],
186
+ axis=0,
187
+ )
188
+ x = tf.reshape(x, new_x_shape)
189
+ return tf.transpose(x, perm=(0, 2, 1, 3))
190
+
191
+ def call(
192
+ self,
193
+ hidden_states,
194
+ attention_mask=None,
195
+ head_mask=None,
196
+ encoder_hidden_states=None,
197
+ encoder_attention_mask=None,
198
+ past_key_value=None,
199
+ output_attentions=False,
200
+ training=None,
201
+ ):
202
+ mixed_query_layer = self.query(hidden_states)
203
+
204
+ # If this is instantiated as a cross-attention module, the keys
205
+ # and values come from an encoder; the attention mask needs to be
206
+ # such that the encoder's padding tokens are not attended to.
207
+ is_cross_attention = encoder_hidden_states is not None
208
+
209
+ if is_cross_attention:
210
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
211
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
212
+ attention_mask = encoder_attention_mask
213
+ elif past_key_value is not None:
214
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
215
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
216
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
217
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
218
+ else:
219
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
220
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
221
+
222
+ query_layer = self.transpose_for_scores(mixed_query_layer)
223
+
224
+ past_key_value = (key_layer, value_layer)
225
+
226
+ # Take the dot product between "query" and "key" to get the raw attention scores.
227
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
228
+
229
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
230
+ seq_length = shape_list(hidden_states)[1]
231
+ position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 1)
232
+ position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 0)
233
+ distance = position_ids_l - position_ids_r
234
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
235
+ positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility
236
+
237
+ if self.position_embedding_type == "relative_key":
238
+ relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
239
+ attention_scores = attention_scores + relative_position_scores
240
+ elif self.position_embedding_type == "relative_key_query":
241
+ relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
242
+ relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
243
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
244
+
245
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
246
+ if attention_mask is not None:
247
+ # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function)
248
+ attention_scores = attention_scores + tf.cast(attention_mask, attention_scores.dtype)
249
+
250
+ # Normalize the attention scores to probabilities.
251
+ attention_probs = stable_softmax(attention_scores, axis=-1)
252
+
253
+ # This is actually dropping out entire tokens to attend to, which might
254
+ # seem a bit unusual, but is taken from the original Transformer paper.
255
+ attention_probs_dropped = self.dropout(attention_probs, training=training)
256
+
257
+ # Mask heads if we want to
258
+ if head_mask is not None:
259
+ attention_probs_dropped = attention_probs_dropped * head_mask
260
+
261
+ context_layer = attention_probs_dropped @ value_layer
262
+
263
+ context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3))
264
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size]
265
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
266
+
267
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
268
+
269
+ outputs = outputs + (past_key_value,)
270
+ return outputs
271
+
272
+ def build(self, input_shape=None):
273
+ if self.built:
274
+ return
275
+ self.built = True
276
+ if getattr(self, "query", None) is not None:
277
+ with tf.name_scope(self.query.name):
278
+ self.query.build([None, None, self.config.hidden_size])
279
+ if self.is_cross_attention:
280
+ if getattr(self, "key", None) is not None:
281
+ with tf.name_scope(self.key.name):
282
+ self.key.build([None, None, self.config.encoder_hidden_size])
283
+ if getattr(self, "value", None) is not None:
284
+ with tf.name_scope(self.value.name):
285
+ self.value.build([None, None, self.config.encoder_hidden_size])
286
+ else:
287
+ if getattr(self, "key", None) is not None:
288
+ with tf.name_scope(self.key.name):
289
+ self.key.build([None, None, self.config.hidden_size])
290
+ if getattr(self, "value", None) is not None:
291
+ with tf.name_scope(self.value.name):
292
+ self.value.build([None, None, self.config.hidden_size])
293
+
294
+
295
+ class TFBlipTextSelfOutput(keras.layers.Layer):
296
+ def __init__(self, config: BlipTextConfig, **kwargs):
297
+ super().__init__(**kwargs)
298
+
299
+ self.dense = keras.layers.Dense(
300
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
301
+ )
302
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
303
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
304
+ self.config = config
305
+
306
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
307
+ hidden_states = self.dense(inputs=hidden_states)
308
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
309
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
310
+
311
+ return hidden_states
312
+
313
+ def build(self, input_shape=None):
314
+ if self.built:
315
+ return
316
+ self.built = True
317
+ if getattr(self, "dense", None) is not None:
318
+ with tf.name_scope(self.dense.name):
319
+ self.dense.build([None, None, self.config.hidden_size])
320
+ if getattr(self, "LayerNorm", None) is not None:
321
+ with tf.name_scope(self.LayerNorm.name):
322
+ self.LayerNorm.build([None, None, self.config.hidden_size])
323
+
324
+
325
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#242
326
+ class TFBlipTextAttention(keras.layers.Layer):
327
+ def __init__(self, config, is_cross_attention=False, **kwargs):
328
+ super().__init__(**kwargs)
329
+ self.self = TFBlipTextSelfAttention(config, is_cross_attention, name="self")
330
+ # "output" is a protected attribute on TF models
331
+ self.self_output = TFBlipTextSelfOutput(config, name="output")
332
+
333
+ def call(
334
+ self,
335
+ hidden_states: tf.Tensor,
336
+ attention_mask: tf.Tensor | None = None,
337
+ head_mask: tf.Tensor | None = None,
338
+ encoder_hidden_states: tf.Tensor | None = None,
339
+ encoder_attention_mask: tf.Tensor | None = None,
340
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
341
+ output_attentions: Optional[bool] = False,
342
+ training: Optional[bool] = None,
343
+ ):
344
+ self_outputs = self.self(
345
+ hidden_states,
346
+ attention_mask,
347
+ head_mask,
348
+ encoder_hidden_states,
349
+ encoder_attention_mask,
350
+ past_key_value,
351
+ output_attentions,
352
+ training=training,
353
+ )
354
+ attention_output = self.self_output(self_outputs[0], hidden_states, training=training)
355
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
356
+ return outputs
357
+
358
+ def build(self, input_shape=None):
359
+ if self.built:
360
+ return
361
+ self.built = True
362
+ if getattr(self, "self", None) is not None:
363
+ with tf.name_scope(self.self.name):
364
+ self.self.build(None)
365
+ if getattr(self, "self_output", None) is not None:
366
+ with tf.name_scope(self.self_output.name):
367
+ self.self_output.build(None)
368
+
369
+
370
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->BlipText
371
+ class TFBlipTextIntermediate(keras.layers.Layer):
372
+ def __init__(self, config: BlipTextConfig, **kwargs):
373
+ super().__init__(**kwargs)
374
+
375
+ self.dense = keras.layers.Dense(
376
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
377
+ )
378
+
379
+ if isinstance(config.hidden_act, str):
380
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
381
+ else:
382
+ self.intermediate_act_fn = config.hidden_act
383
+ self.config = config
384
+
385
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
386
+ hidden_states = self.dense(inputs=hidden_states)
387
+ hidden_states = self.intermediate_act_fn(hidden_states)
388
+
389
+ return hidden_states
390
+
391
+ def build(self, input_shape=None):
392
+ if self.built:
393
+ return
394
+ self.built = True
395
+ if getattr(self, "dense", None) is not None:
396
+ with tf.name_scope(self.dense.name):
397
+ self.dense.build([None, None, self.config.hidden_size])
398
+
399
+
400
+ class TFBlipTextOutput(keras.layers.Layer):
401
+ def __init__(self, config: BlipTextConfig, **kwargs):
402
+ super().__init__(**kwargs)
403
+
404
+ self.dense = keras.layers.Dense(
405
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
406
+ )
407
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
408
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
409
+ self.config = config
410
+
411
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
412
+ hidden_states = self.dense(inputs=hidden_states)
413
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
414
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
415
+
416
+ return hidden_states
417
+
418
+ def build(self, input_shape=None):
419
+ if self.built:
420
+ return
421
+ self.built = True
422
+ if getattr(self, "dense", None) is not None:
423
+ with tf.name_scope(self.dense.name):
424
+ self.dense.build([None, None, self.config.intermediate_size])
425
+ if getattr(self, "LayerNorm", None) is not None:
426
+ with tf.name_scope(self.LayerNorm.name):
427
+ self.LayerNorm.build([None, None, self.config.hidden_size])
428
+
429
+
430
+ class TFBlipTextLayer(keras.layers.Layer):
431
+ def __init__(self, config, **kwargs):
432
+ super().__init__(**kwargs)
433
+ self.config = config
434
+ self.attention = TFBlipTextAttention(config, name="attention")
435
+ if self.config.is_decoder:
436
+ self.crossattention = TFBlipTextAttention(
437
+ config, is_cross_attention=self.config.is_decoder, name="crossattention"
438
+ )
439
+ self.intermediate = TFBlipTextIntermediate(config, name="intermediate")
440
+ self.self_output = TFBlipTextOutput(config, name="output")
441
+
442
+ def call(
443
+ self,
444
+ hidden_states,
445
+ attention_mask=None,
446
+ head_mask=None,
447
+ encoder_hidden_states=None,
448
+ encoder_attention_mask=None,
449
+ past_key_value=None,
450
+ output_attentions=False,
451
+ training=None,
452
+ ):
453
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
454
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
455
+ self_attention_outputs = self.attention(
456
+ hidden_states,
457
+ attention_mask,
458
+ head_mask,
459
+ output_attentions=output_attentions,
460
+ past_key_value=self_attn_past_key_value,
461
+ training=training,
462
+ )
463
+ attention_output = self_attention_outputs[0]
464
+
465
+ outputs = self_attention_outputs[1:-1]
466
+ present_key_value = self_attention_outputs[-1]
467
+
468
+ if encoder_hidden_states is not None:
469
+ cross_attention_outputs = self.crossattention(
470
+ attention_output,
471
+ attention_mask,
472
+ head_mask,
473
+ encoder_hidden_states,
474
+ encoder_attention_mask,
475
+ output_attentions=output_attentions,
476
+ training=training,
477
+ )
478
+ attention_output = cross_attention_outputs[0]
479
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
480
+ intermediate_output = self.intermediate(attention_output)
481
+ layer_output = self.self_output(intermediate_output, attention_output, training=training)
482
+ outputs = (layer_output,) + outputs
483
+
484
+ outputs = outputs + (present_key_value,)
485
+
486
+ return outputs
487
+
488
+ def build(self, input_shape=None):
489
+ if self.built:
490
+ return
491
+ self.built = True
492
+ if getattr(self, "attention", None) is not None:
493
+ with tf.name_scope(self.attention.name):
494
+ self.attention.build(None)
495
+ if getattr(self, "intermediate", None) is not None:
496
+ with tf.name_scope(self.intermediate.name):
497
+ self.intermediate.build(None)
498
+ if getattr(self, "self_output", None) is not None:
499
+ with tf.name_scope(self.self_output.name):
500
+ self.self_output.build(None)
501
+ if getattr(self, "crossattention", None) is not None:
502
+ with tf.name_scope(self.crossattention.name):
503
+ self.crossattention.build(None)
504
+
505
+
506
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L386
507
+ @keras_serializable
508
+ class TFBlipTextEncoder(keras.layers.Layer):
509
+ config_class = BlipTextConfig
510
+
511
+ def __init__(self, config, name=None, **kwargs):
512
+ super().__init__(name=name, **kwargs)
513
+ self.config = config
514
+ self.layer = [TFBlipTextLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
515
+
516
+ @unpack_inputs
517
+ def call(
518
+ self,
519
+ hidden_states,
520
+ attention_mask=None,
521
+ head_mask=None,
522
+ encoder_hidden_states=None,
523
+ encoder_attention_mask=None,
524
+ past_key_values=None,
525
+ use_cache=None,
526
+ output_attentions=False,
527
+ output_hidden_states=False,
528
+ return_dict=True,
529
+ training=None,
530
+ ):
531
+ all_hidden_states = () if output_hidden_states else None
532
+ all_self_attentions = () if output_attentions else None
533
+ all_cross_attentions = () if output_attentions and self.config.is_decoder else None
534
+
535
+ next_decoder_cache = () if use_cache else None
536
+
537
+ for i in range(self.config.num_hidden_layers):
538
+ layer_module = self.layer[i]
539
+ if output_hidden_states:
540
+ all_hidden_states = all_hidden_states + (hidden_states,)
541
+
542
+ layer_head_mask = head_mask[i] if head_mask is not None else None
543
+ past_key_value = past_key_values[i] if past_key_values is not None else None
544
+
545
+ layer_outputs = layer_module(
546
+ hidden_states,
547
+ attention_mask,
548
+ layer_head_mask,
549
+ encoder_hidden_states,
550
+ encoder_attention_mask,
551
+ past_key_value,
552
+ output_attentions,
553
+ training=training,
554
+ )
555
+
556
+ hidden_states = layer_outputs[0]
557
+ if use_cache:
558
+ next_decoder_cache += (layer_outputs[-1],)
559
+ if output_attentions:
560
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
561
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
562
+
563
+ if output_hidden_states:
564
+ all_hidden_states = all_hidden_states + (hidden_states,)
565
+
566
+ if not return_dict:
567
+ return tuple(
568
+ v
569
+ for v in [
570
+ hidden_states,
571
+ next_decoder_cache,
572
+ all_hidden_states,
573
+ all_self_attentions,
574
+ all_cross_attentions,
575
+ ]
576
+ if v is not None
577
+ )
578
+ return TFBaseModelOutputWithPastAndCrossAttentions(
579
+ last_hidden_state=hidden_states,
580
+ past_key_values=next_decoder_cache,
581
+ hidden_states=all_hidden_states,
582
+ attentions=all_self_attentions,
583
+ cross_attentions=all_cross_attentions,
584
+ )
585
+
586
+ def build(self, input_shape=None):
587
+ if self.built:
588
+ return
589
+ self.built = True
590
+ if getattr(self, "layer", None) is not None:
591
+ for layer in self.layer:
592
+ with tf.name_scope(layer.name):
593
+ layer.build(None)
594
+
595
+
596
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->BlipText
597
+ class TFBlipTextPooler(keras.layers.Layer):
598
+ def __init__(self, config: BlipTextConfig, **kwargs):
599
+ super().__init__(**kwargs)
600
+
601
+ self.dense = keras.layers.Dense(
602
+ units=config.hidden_size,
603
+ kernel_initializer=get_initializer(config.initializer_range),
604
+ activation="tanh",
605
+ name="dense",
606
+ )
607
+ self.config = config
608
+
609
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
610
+ # We "pool" the model by simply taking the hidden state corresponding
611
+ # to the first token.
612
+ first_token_tensor = hidden_states[:, 0]
613
+ pooled_output = self.dense(inputs=first_token_tensor)
614
+
615
+ return pooled_output
616
+
617
+ def build(self, input_shape=None):
618
+ if self.built:
619
+ return
620
+ self.built = True
621
+ if getattr(self, "dense", None) is not None:
622
+ with tf.name_scope(self.dense.name):
623
+ self.dense.build([None, None, self.config.hidden_size])
624
+
625
+
626
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->BlipText
627
+ class TFBlipTextPredictionHeadTransform(keras.layers.Layer):
628
+ def __init__(self, config: BlipTextConfig, **kwargs):
629
+ super().__init__(**kwargs)
630
+
631
+ self.dense = keras.layers.Dense(
632
+ units=config.hidden_size,
633
+ kernel_initializer=get_initializer(config.initializer_range),
634
+ name="dense",
635
+ )
636
+
637
+ if isinstance(config.hidden_act, str):
638
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
639
+ else:
640
+ self.transform_act_fn = config.hidden_act
641
+
642
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
643
+ self.config = config
644
+
645
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
646
+ hidden_states = self.dense(inputs=hidden_states)
647
+ hidden_states = self.transform_act_fn(hidden_states)
648
+ hidden_states = self.LayerNorm(inputs=hidden_states)
649
+
650
+ return hidden_states
651
+
652
+ def build(self, input_shape=None):
653
+ if self.built:
654
+ return
655
+ self.built = True
656
+ if getattr(self, "dense", None) is not None:
657
+ with tf.name_scope(self.dense.name):
658
+ self.dense.build([None, None, self.config.hidden_size])
659
+ if getattr(self, "LayerNorm", None) is not None:
660
+ with tf.name_scope(self.LayerNorm.name):
661
+ self.LayerNorm.build([None, None, self.config.hidden_size])
662
+
663
+
664
+ class TFBlipTextLMPredictionHead(keras.layers.Layer):
665
+ def __init__(self, config, **kwargs):
666
+ super().__init__(**kwargs)
667
+ self.transform = TFBlipTextPredictionHeadTransform(config, name="transform")
668
+
669
+ # The output weights are the same as the input embeddings, but there is
670
+ # an output-only bias for each token.
671
+ self.decoder = keras.layers.Dense(
672
+ config.vocab_size,
673
+ kernel_initializer=get_initializer(config.initializer_range),
674
+ name="decoder",
675
+ use_bias=False,
676
+ )
677
+ self.config = config
678
+
679
+ def build(self, input_shape=None):
680
+ self.bias = self.add_weight(name="bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True)
681
+
682
+ if self.built:
683
+ return
684
+ self.built = True
685
+ if getattr(self, "transform", None) is not None:
686
+ with tf.name_scope(self.transform.name):
687
+ self.transform.build(None)
688
+ if getattr(self, "decoder", None) is not None:
689
+ with tf.name_scope(self.decoder.name):
690
+ self.decoder.build([None, None, self.config.hidden_size])
691
+
692
+ def call(self, hidden_states):
693
+ hidden_states = self.transform(hidden_states)
694
+ hidden_states = self.decoder(hidden_states) + self.bias
695
+ return hidden_states
696
+
697
+
698
+ class TFBlipTextOnlyMLMHead(keras.layers.Layer):
699
+ def __init__(self, config, **kwargs):
700
+ super().__init__(**kwargs)
701
+ self.predictions = TFBlipTextLMPredictionHead(config, name="predictions")
702
+
703
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
704
+ prediction_scores = self.predictions(sequence_output)
705
+ return prediction_scores
706
+
707
+ def build(self, input_shape=None):
708
+ if self.built:
709
+ return
710
+ self.built = True
711
+ if getattr(self, "predictions", None) is not None:
712
+ with tf.name_scope(self.predictions.name):
713
+ self.predictions.build(None)
714
+
715
+
716
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L548
717
+ class TFBlipTextPreTrainedModel(TFPreTrainedModel):
718
+ """
719
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
720
+ models.
721
+ """
722
+
723
+ config_class = BlipTextConfig
724
+ base_model_prefix = "bert"
725
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
726
+
727
+
728
+ # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571
729
+ class TFBlipTextModel(TFBlipTextPreTrainedModel):
730
+ """
731
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
732
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
733
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
734
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an
735
+ `encoder_hidden_states` is then expected as an input to the forward pass.
736
+ """
737
+
738
+ def __init__(self, config, add_pooling_layer=True, name=None, **kwargs):
739
+ super().__init__(config, name=name, **kwargs)
740
+ self.config = config
741
+
742
+ self.embeddings = TFBlipTextEmbeddings(config, name="embeddings")
743
+ self.encoder = TFBlipTextEncoder(config, name="encoder")
744
+ self.pooler = TFBlipTextPooler(config, name="pooler") if add_pooling_layer else None
745
+
746
+ def get_input_embeddings(self):
747
+ return self.embeddings.word_embeddings
748
+
749
+ def set_input_embeddings(self, value):
750
+ self.embeddings.word_embeddings = value
751
+
752
+ @tf.function
753
+ def get_extended_attention_mask(
754
+ self, attention_mask: tf.Tensor, input_shape: Tuple[int], is_decoder: bool
755
+ ) -> tf.Tensor:
756
+ """
757
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
758
+
759
+ Arguments:
760
+ attention_mask (`tf.Tensor`):
761
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
762
+ input_shape (`Tuple[int]`):
763
+ The shape of the input to the model.
764
+ is_decoder (`bool`):
765
+ Whether the model is used as a decoder.
766
+
767
+ Returns:
768
+ `tf.Tensor` The extended attention mask, with the same dtype as `attention_mask.dtype`.
769
+ """
770
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
771
+ # ourselves in which case we just need to make it broadcastable to all heads.
772
+ if not isinstance(attention_mask, tf.Tensor):
773
+ attention_mask = tf.convert_to_tensor(attention_mask) # Catches NumPy inputs that haven't been cast yet
774
+ if attention_mask.shape.rank == 3:
775
+ extended_attention_mask = attention_mask[:, None, :, :]
776
+ elif attention_mask.shape.rank == 2:
777
+ # Provided a padding mask of dimensions [batch_size, seq_length]
778
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
779
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
780
+ if is_decoder:
781
+ batch_size, seq_length = input_shape
782
+
783
+ seq_ids = tf.range(seq_length, dtype=attention_mask.dtype)
784
+ causal_mask = tf.broadcast_to(seq_ids, (batch_size, seq_length, seq_length)) <= seq_ids[None, :, None]
785
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
786
+
787
+ if shape_list(causal_mask)[1] < shape_list(attention_mask)[1]:
788
+ prefix_seq_len = tf.shape(attention_mask)[1] - tf.shape(causal_mask)[1]
789
+ causal_mask = tf.concat(
790
+ [
791
+ tf.ones((batch_size, seq_length, prefix_seq_len), dtype=causal_mask.dtype),
792
+ causal_mask,
793
+ ],
794
+ axis=-1,
795
+ )
796
+ extended_attention_mask = (
797
+ tf.cast(causal_mask[:, None, :, :], attention_mask.dtype) * attention_mask[:, None, None, :]
798
+ )
799
+ else:
800
+ extended_attention_mask = attention_mask[:, None, None, :]
801
+ else:
802
+ raise ValueError(
803
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
804
+ input_shape, attention_mask.shape
805
+ )
806
+ )
807
+
808
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
809
+ # masked positions, this operation will create a tensor which is 0.0 for
810
+ # positions we want to attend and -10000.0 for masked positions.
811
+ # Since we are adding it to the raw scores before the softmax, this is
812
+ # effectively the same as removing these entirely.
813
+ extended_attention_mask = tf.cast(extended_attention_mask, self.dtype) # fp16 compatibility
814
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
815
+ return extended_attention_mask
816
+
817
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
818
+ @unpack_inputs
819
+ def call(
820
+ self,
821
+ input_ids: TFModelInputType | None = None,
822
+ attention_mask: tf.Tensor | None = None,
823
+ position_ids: tf.Tensor | None = None,
824
+ head_mask: tf.Tensor | None = None,
825
+ inputs_embeds: tf.Tensor | None = None,
826
+ encoder_embeds: tf.Tensor | None = None,
827
+ encoder_hidden_states: tf.Tensor | None = None,
828
+ encoder_attention_mask: tf.Tensor | None = None,
829
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
830
+ use_cache: bool | None = None,
831
+ output_attentions: bool | None = None,
832
+ output_hidden_states: bool | None = None,
833
+ return_dict: bool | None = None,
834
+ is_decoder: bool = False,
835
+ training: bool = False,
836
+ ) -> Tuple[tf.Tensor] | TFBaseModelOutputWithPoolingAndCrossAttentions:
837
+ r"""
838
+ encoder_hidden_states (`tf.Tensor`, *optional*):
839
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
840
+ the model is configured as a decoder.
841
+ encoder_attention_mask (`tf.Tensor`, *optional*):
842
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
843
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
844
+ - 1 for tokens that are **not masked**,
845
+ - 0 for tokens that are **masked**.
846
+ past_key_values (`tuple(tuple(tf.Tensor))`, *optional*):
847
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
848
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
849
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
850
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
851
+ use_cache (`bool`, *optional*):
852
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
853
+ `past_key_values`).
854
+ """
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
858
+ )
859
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
860
+
861
+ if is_decoder:
862
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
863
+ else:
864
+ use_cache = False
865
+
866
+ if input_ids is not None and inputs_embeds is not None:
867
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
868
+ elif input_ids is not None:
869
+ input_shape = shape_list(input_ids)
870
+ batch_size, seq_length = input_shape
871
+ elif inputs_embeds is not None:
872
+ input_shape = shape_list(inputs_embeds)[:-1]
873
+ batch_size, seq_length = input_shape
874
+ elif encoder_embeds is not None:
875
+ input_shape = shape_list(encoder_embeds)[:-1]
876
+ batch_size, seq_length = input_shape
877
+ else:
878
+ raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
879
+
880
+ # past_key_values_length
881
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
882
+
883
+ if attention_mask is None:
884
+ attention_mask = tf.ones(((batch_size, seq_length + past_key_values_length)))
885
+
886
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
887
+ # ourselves in which case we just need to make it broadcastable to all heads.
888
+ extended_attention_mask: tf.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, is_decoder)
889
+
890
+ # If a 2D or 3D attention mask is provided for the cross-attention
891
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
892
+ if encoder_hidden_states is not None:
893
+ if isinstance(encoder_hidden_states, list):
894
+ encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states[0])
895
+ else:
896
+ encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states)
897
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
898
+
899
+ if isinstance(encoder_attention_mask, list):
900
+ encoder_extended_attention_mask = [invert_attention_mask(mask) for mask in encoder_attention_mask]
901
+ elif encoder_attention_mask is None:
902
+ encoder_attention_mask = tf.ones(encoder_hidden_shape)
903
+ encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask)
904
+ else:
905
+ encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask)
906
+ else:
907
+ encoder_extended_attention_mask = None
908
+
909
+ # Prepare head mask if needed
910
+ # 1.0 in head_mask indicate we keep the head
911
+ # attention_probs has shape bsz x n_heads x N x N
912
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
913
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
914
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
915
+
916
+ if encoder_embeds is None:
917
+ embedding_output = self.embeddings(
918
+ input_ids=input_ids,
919
+ position_ids=position_ids,
920
+ inputs_embeds=inputs_embeds,
921
+ past_key_values_length=past_key_values_length,
922
+ )
923
+ else:
924
+ embedding_output = encoder_embeds
925
+
926
+ encoder_outputs = self.encoder(
927
+ embedding_output,
928
+ attention_mask=extended_attention_mask,
929
+ head_mask=head_mask,
930
+ encoder_hidden_states=encoder_hidden_states,
931
+ encoder_attention_mask=encoder_extended_attention_mask,
932
+ past_key_values=past_key_values,
933
+ use_cache=use_cache,
934
+ output_attentions=output_attentions,
935
+ output_hidden_states=output_hidden_states,
936
+ return_dict=return_dict,
937
+ training=training,
938
+ )
939
+ sequence_output = encoder_outputs[0]
940
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
941
+
942
+ if not return_dict:
943
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
944
+
945
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
946
+ last_hidden_state=sequence_output,
947
+ pooler_output=pooled_output,
948
+ past_key_values=encoder_outputs.past_key_values,
949
+ hidden_states=encoder_outputs.hidden_states,
950
+ attentions=encoder_outputs.attentions,
951
+ cross_attentions=encoder_outputs.cross_attentions,
952
+ )
953
+
954
+ def build(self, input_shape=None):
955
+ if self.built:
956
+ return
957
+ self.built = True
958
+ if getattr(self, "embeddings", None) is not None:
959
+ with tf.name_scope(self.embeddings.name):
960
+ self.embeddings.build(None)
961
+ if getattr(self, "encoder", None) is not None:
962
+ with tf.name_scope(self.encoder.name):
963
+ self.encoder.build(None)
964
+ if getattr(self, "pooler", None) is not None:
965
+ with tf.name_scope(self.pooler.name):
966
+ self.pooler.build(None)
967
+
968
+
969
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L811
970
+ class TFBlipTextLMHeadModel(TFBlipTextPreTrainedModel):
971
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
972
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
973
+
974
+ def __init__(self, config, **kwargs):
975
+ super().__init__(config, **kwargs)
976
+
977
+ self.bert = TFBlipTextModel(config, add_pooling_layer=False, name="bert")
978
+ self.cls = TFBlipTextOnlyMLMHead(config, name="cls")
979
+ self.label_smoothing = config.label_smoothing
980
+
981
+ def get_output_embeddings(self):
982
+ return self.cls.predictions.decoder
983
+
984
+ def set_output_embeddings(self, new_embeddings):
985
+ self.cls.predictions.decoder = new_embeddings
986
+
987
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
988
+ @unpack_inputs
989
+ def call(
990
+ self,
991
+ input_ids=None,
992
+ attention_mask=None,
993
+ position_ids=None,
994
+ head_mask=None,
995
+ inputs_embeds=None,
996
+ encoder_hidden_states=None,
997
+ encoder_attention_mask=None,
998
+ labels=None,
999
+ past_key_values=None,
1000
+ use_cache=None,
1001
+ output_attentions=None,
1002
+ output_hidden_states=None,
1003
+ return_dict=None,
1004
+ return_logits=False,
1005
+ is_decoder=True,
1006
+ training=None,
1007
+ ):
1008
+ r"""
1009
+ encoder_hidden_states (`tf.Tensor`, *optional*): Sequence of
1010
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is
1011
+ configured as a decoder.
1012
+ encoder_attention_mask (`tf.Tensor`, *optional*):
1013
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1014
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1015
+ - 1 for tokens that are **not masked**,
1016
+ - 0 for tokens that are **masked**.
1017
+ labels (`tf.Tensor`, *optional*):
1018
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1019
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1020
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1021
+ past_key_values (`tuple(tuple(tf.Tensor))`, *optional*):
1022
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1023
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1024
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1025
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1026
+ use_cache (`bool`, *optional*):
1027
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1028
+ `past_key_values`).
1029
+ """
1030
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1031
+ if labels is not None:
1032
+ use_cache = False
1033
+
1034
+ outputs = self.bert(
1035
+ input_ids,
1036
+ attention_mask=attention_mask,
1037
+ position_ids=position_ids,
1038
+ head_mask=head_mask,
1039
+ inputs_embeds=inputs_embeds,
1040
+ encoder_hidden_states=encoder_hidden_states,
1041
+ encoder_attention_mask=encoder_attention_mask,
1042
+ past_key_values=past_key_values,
1043
+ use_cache=use_cache,
1044
+ output_attentions=output_attentions,
1045
+ output_hidden_states=output_hidden_states,
1046
+ return_dict=return_dict,
1047
+ is_decoder=is_decoder,
1048
+ training=training,
1049
+ )
1050
+
1051
+ sequence_output = outputs[0]
1052
+ prediction_scores = self.cls(sequence_output)
1053
+
1054
+ if return_logits:
1055
+ return prediction_scores[:, :-1, :]
1056
+
1057
+ lm_loss = None
1058
+ if labels is not None:
1059
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1060
+ shifted_prediction_scores = prediction_scores[:, :-1, :]
1061
+ shifted_prediction_scores = tf.reshape(shifted_prediction_scores, (-1, self.config.vocab_size))
1062
+ labels = labels[:, 1:]
1063
+ labels = tf.reshape(labels, (-1,))
1064
+ # Keras won't give us label smoothing for sparse CE, so we de-sparsify things here
1065
+ # Use relu to clamp masked labels at 0 to avoid NaN (we will be zeroing those out later anyway)
1066
+ one_hot_labels = tf.one_hot(tf.nn.relu(labels), depth=self.config.vocab_size, dtype=tf.float32)
1067
+ loss_fct = keras.losses.CategoricalCrossentropy(
1068
+ from_logits=True, label_smoothing=self.label_smoothing, reduction="none"
1069
+ )
1070
+ masked_positions = tf.cast(tf.not_equal(labels, -100), dtype=tf.float32)
1071
+ lm_loss = loss_fct(one_hot_labels, shifted_prediction_scores)
1072
+ lm_loss *= masked_positions
1073
+ lm_loss = tf.reduce_sum(lm_loss, axis=0) / tf.math.count_nonzero(masked_positions, dtype=tf.float32)
1074
+
1075
+ if not return_dict:
1076
+ output = (prediction_scores,) + outputs[2:]
1077
+ return ((lm_loss,) + output) if lm_loss is not None else output
1078
+
1079
+ return TFCausalLMOutputWithCrossAttentions(
1080
+ loss=lm_loss,
1081
+ logits=prediction_scores,
1082
+ past_key_values=outputs.past_key_values,
1083
+ hidden_states=outputs.hidden_states,
1084
+ attentions=outputs.attentions,
1085
+ cross_attentions=outputs.cross_attentions,
1086
+ )
1087
+
1088
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1089
+ input_shape = input_ids.shape
1090
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1091
+ if attention_mask is None:
1092
+ attention_mask = input_ids.new_ones(input_shape)
1093
+
1094
+ # cut decoder_input_ids if past_key_values is used
1095
+ if past_key_values is not None:
1096
+ input_ids = input_ids[:, -1:]
1097
+
1098
+ return {
1099
+ "input_ids": input_ids,
1100
+ "attention_mask": attention_mask,
1101
+ "past_key_values": past_key_values,
1102
+ "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
1103
+ "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
1104
+ "is_decoder": True,
1105
+ }
1106
+
1107
+ def _reorder_cache(self, past_key_values, beam_idx):
1108
+ reordered_past = ()
1109
+ for layer_past in past_key_values:
1110
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1111
+ return reordered_past
1112
+
1113
+ def build(self, input_shape=None):
1114
+ if self.built:
1115
+ return
1116
+ self.built = True
1117
+ if getattr(self, "bert", None) is not None:
1118
+ with tf.name_scope(self.bert.name):
1119
+ self.bert.build(None)
1120
+ if getattr(self, "cls", None) is not None:
1121
+ with tf.name_scope(self.cls.name):
1122
+ self.cls.build(None)
venv/lib/python3.10/site-packages/transformers/models/blip/processing_blip.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Blip.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...image_utils import ImageInput
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class BlipProcessor(ProcessorMixin):
28
+ r"""
29
+ Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor.
30
+
31
+ [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the
32
+ docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`BlipImageProcessor`):
36
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
37
+ tokenizer (`BertTokenizerFast`):
38
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "BlipImageProcessor"
43
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
44
+
45
+ def __init__(self, image_processor, tokenizer):
46
+ tokenizer.return_token_type_ids = False
47
+ super().__init__(image_processor, tokenizer)
48
+ self.current_processor = self.image_processor
49
+
50
+ def __call__(
51
+ self,
52
+ images: ImageInput = None,
53
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
54
+ add_special_tokens: bool = True,
55
+ padding: Union[bool, str, PaddingStrategy] = False,
56
+ truncation: Union[bool, str, TruncationStrategy] = None,
57
+ max_length: Optional[int] = None,
58
+ stride: int = 0,
59
+ pad_to_multiple_of: Optional[int] = None,
60
+ return_attention_mask: Optional[bool] = None,
61
+ return_overflowing_tokens: bool = False,
62
+ return_special_tokens_mask: bool = False,
63
+ return_offsets_mapping: bool = False,
64
+ return_token_type_ids: bool = False,
65
+ return_length: bool = False,
66
+ verbose: bool = True,
67
+ return_tensors: Optional[Union[str, TensorType]] = None,
68
+ **kwargs,
69
+ ) -> BatchEncoding:
70
+ """
71
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
72
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
73
+
74
+ Please refer to the docstring of the above two methods for more information.
75
+ """
76
+ if images is None and text is None:
77
+ raise ValueError("You have to specify either images or text.")
78
+
79
+ # Get only text
80
+ if images is None:
81
+ self.current_processor = self.tokenizer
82
+ text_encoding = self.tokenizer(
83
+ text=text,
84
+ add_special_tokens=add_special_tokens,
85
+ padding=padding,
86
+ truncation=truncation,
87
+ max_length=max_length,
88
+ stride=stride,
89
+ pad_to_multiple_of=pad_to_multiple_of,
90
+ return_attention_mask=return_attention_mask,
91
+ return_overflowing_tokens=return_overflowing_tokens,
92
+ return_special_tokens_mask=return_special_tokens_mask,
93
+ return_offsets_mapping=return_offsets_mapping,
94
+ return_token_type_ids=return_token_type_ids,
95
+ return_length=return_length,
96
+ verbose=verbose,
97
+ return_tensors=return_tensors,
98
+ **kwargs,
99
+ )
100
+ return text_encoding
101
+
102
+ # add pixel_values
103
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
104
+
105
+ if text is not None:
106
+ text_encoding = self.tokenizer(
107
+ text=text,
108
+ add_special_tokens=add_special_tokens,
109
+ padding=padding,
110
+ truncation=truncation,
111
+ max_length=max_length,
112
+ stride=stride,
113
+ pad_to_multiple_of=pad_to_multiple_of,
114
+ return_attention_mask=return_attention_mask,
115
+ return_overflowing_tokens=return_overflowing_tokens,
116
+ return_special_tokens_mask=return_special_tokens_mask,
117
+ return_offsets_mapping=return_offsets_mapping,
118
+ return_token_type_ids=return_token_type_ids,
119
+ return_length=return_length,
120
+ verbose=verbose,
121
+ return_tensors=return_tensors,
122
+ **kwargs,
123
+ )
124
+ else:
125
+ text_encoding = None
126
+
127
+ if text_encoding is not None:
128
+ encoding_image_processor.update(text_encoding)
129
+
130
+ return encoding_image_processor
131
+
132
+ def batch_decode(self, *args, **kwargs):
133
+ """
134
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
135
+ refer to the docstring of this method for more information.
136
+ """
137
+ return self.tokenizer.batch_decode(*args, **kwargs)
138
+
139
+ def decode(self, *args, **kwargs):
140
+ """
141
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
142
+ the docstring of this method for more information.
143
+ """
144
+ return self.tokenizer.decode(*args, **kwargs)
145
+
146
+ @property
147
+ def model_input_names(self):
148
+ tokenizer_input_names = self.tokenizer.model_input_names
149
+ image_processor_input_names = self.image_processor.model_input_names
150
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Conditional DETR checkpoints."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from collections import OrderedDict
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ ConditionalDetrConfig,
30
+ ConditionalDetrForObjectDetection,
31
+ ConditionalDetrForSegmentation,
32
+ ConditionalDetrImageProcessor,
33
+ )
34
+ from transformers.utils import logging
35
+
36
+
37
+ logging.set_verbosity_info()
38
+ logger = logging.get_logger(__name__)
39
+
40
+ # here we list all keys to be renamed (original name on the left, our name on the right)
41
+ rename_keys = []
42
+ for i in range(6):
43
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
44
+ rename_keys.append(
45
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
46
+ )
47
+ rename_keys.append(
48
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
49
+ )
50
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
51
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
52
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
53
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
54
+ rename_keys.append(
55
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
56
+ )
57
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
58
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
59
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
60
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
61
+ rename_keys.append(
62
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
63
+ )
64
+ rename_keys.append(
65
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
66
+ )
67
+ rename_keys.append(
68
+ (
69
+ f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
70
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
71
+ )
72
+ )
73
+ rename_keys.append(
74
+ (
75
+ f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
76
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
77
+ )
78
+ )
79
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
80
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
81
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
82
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
83
+ rename_keys.append(
84
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
85
+ )
86
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
87
+ rename_keys.append(
88
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
89
+ )
90
+ rename_keys.append(
91
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
92
+ )
93
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
94
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
95
+
96
+ # q, k, v projections in self/cross-attention in decoder for conditional DETR
97
+ rename_keys.append(
98
+ (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
99
+ )
100
+ rename_keys.append(
101
+ (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
102
+ )
103
+ rename_keys.append(
104
+ (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
105
+ )
106
+ rename_keys.append(
107
+ (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
108
+ )
109
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
110
+ rename_keys.append(
111
+ (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
112
+ )
113
+ # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
114
+ rename_keys.append(
115
+ (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
116
+ )
117
+ rename_keys.append(
118
+ (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
119
+ )
120
+ rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
121
+ rename_keys.append(
122
+ (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
123
+ )
124
+
125
+ rename_keys.append(
126
+ (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
127
+ )
128
+ rename_keys.append(
129
+ (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
130
+ )
131
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
132
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
133
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
134
+ rename_keys.append(
135
+ (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
136
+ )
137
+ # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
138
+ rename_keys.append(
139
+ (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
140
+ )
141
+ rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
142
+ rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
143
+ rename_keys.append(
144
+ (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
145
+ )
146
+
147
+ # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
148
+ # for conditional DETR, also convert reference point head and query scale MLP
149
+ rename_keys.extend(
150
+ [
151
+ ("input_proj.weight", "input_projection.weight"),
152
+ ("input_proj.bias", "input_projection.bias"),
153
+ ("query_embed.weight", "query_position_embeddings.weight"),
154
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
155
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
156
+ ("class_embed.weight", "class_labels_classifier.weight"),
157
+ ("class_embed.bias", "class_labels_classifier.bias"),
158
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
159
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
160
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
161
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
162
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
163
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
164
+ ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
165
+ ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
166
+ ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
167
+ ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
168
+ ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
169
+ ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
170
+ ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
171
+ ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
172
+ ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
173
+ ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
174
+ ]
175
+ )
176
+
177
+
178
+ def rename_key(state_dict, old, new):
179
+ val = state_dict.pop(old)
180
+ state_dict[new] = val
181
+
182
+
183
+ def rename_backbone_keys(state_dict):
184
+ new_state_dict = OrderedDict()
185
+ for key, value in state_dict.items():
186
+ if "backbone.0.body" in key:
187
+ new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
188
+ new_state_dict[new_key] = value
189
+ else:
190
+ new_state_dict[key] = value
191
+
192
+ return new_state_dict
193
+
194
+
195
+ def read_in_q_k_v(state_dict, is_panoptic=False):
196
+ prefix = ""
197
+ if is_panoptic:
198
+ prefix = "conditional_detr."
199
+
200
+ # first: transformer encoder
201
+ for i in range(6):
202
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
203
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
204
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
205
+ # next, add query, keys and values (in that order) to the state dict
206
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
207
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
208
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
209
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
210
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
211
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
212
+
213
+
214
+ # We will verify our results on an image of cute cats
215
+ def prepare_img():
216
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
217
+ im = Image.open(requests.get(url, stream=True).raw)
218
+
219
+ return im
220
+
221
+
222
+ @torch.no_grad()
223
+ def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path):
224
+ """
225
+ Copy/paste/tweak model's weights to our CONDITIONAL_DETR structure.
226
+ """
227
+
228
+ # load default config
229
+ config = ConditionalDetrConfig()
230
+ # set backbone and dilation attributes
231
+ if "resnet101" in model_name:
232
+ config.backbone = "resnet101"
233
+ if "dc5" in model_name:
234
+ config.dilation = True
235
+ is_panoptic = "panoptic" in model_name
236
+ if is_panoptic:
237
+ config.num_labels = 250
238
+ else:
239
+ config.num_labels = 91
240
+ repo_id = "huggingface/label-files"
241
+ filename = "coco-detection-id2label.json"
242
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
243
+ id2label = {int(k): v for k, v in id2label.items()}
244
+ config.id2label = id2label
245
+ config.label2id = {v: k for k, v in id2label.items()}
246
+
247
+ # load image processor
248
+ format = "coco_panoptic" if is_panoptic else "coco_detection"
249
+ image_processor = ConditionalDetrImageProcessor(format=format)
250
+
251
+ # prepare image
252
+ img = prepare_img()
253
+ encoding = image_processor(images=img, return_tensors="pt")
254
+ pixel_values = encoding["pixel_values"]
255
+
256
+ logger.info(f"Converting model {model_name}...")
257
+
258
+ # load original model from torch hub
259
+ conditional_detr = torch.hub.load("DeppMeng/ConditionalDETR", model_name, pretrained=True).eval()
260
+ state_dict = conditional_detr.state_dict()
261
+ # rename keys
262
+ for src, dest in rename_keys:
263
+ if is_panoptic:
264
+ src = "conditional_detr." + src
265
+ rename_key(state_dict, src, dest)
266
+ state_dict = rename_backbone_keys(state_dict)
267
+ # query, key and value matrices need special treatment
268
+ read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
269
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
270
+ prefix = "conditional_detr.model." if is_panoptic else "model."
271
+ for key in state_dict.copy().keys():
272
+ if is_panoptic:
273
+ if (
274
+ key.startswith("conditional_detr")
275
+ and not key.startswith("class_labels_classifier")
276
+ and not key.startswith("bbox_predictor")
277
+ ):
278
+ val = state_dict.pop(key)
279
+ state_dict["conditional_detr.model" + key[4:]] = val
280
+ elif "class_labels_classifier" in key or "bbox_predictor" in key:
281
+ val = state_dict.pop(key)
282
+ state_dict["conditional_detr." + key] = val
283
+ elif key.startswith("bbox_attention") or key.startswith("mask_head"):
284
+ continue
285
+ else:
286
+ val = state_dict.pop(key)
287
+ state_dict[prefix + key] = val
288
+ else:
289
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
290
+ val = state_dict.pop(key)
291
+ state_dict[prefix + key] = val
292
+ # finally, create HuggingFace model and load state dict
293
+ model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config)
294
+ model.load_state_dict(state_dict)
295
+ model.eval()
296
+ model.push_to_hub(repo_id=model_name, organization="DepuMeng", commit_message="Add model")
297
+ # verify our conversion
298
+ original_outputs = conditional_detr(pixel_values)
299
+ outputs = model(pixel_values)
300
+ assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4)
301
+ assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4)
302
+ if is_panoptic:
303
+ assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
304
+
305
+ # Save model and image processor
306
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
307
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
308
+ model.save_pretrained(pytorch_dump_folder_path)
309
+ image_processor.save_pretrained(pytorch_dump_folder_path)
310
+
311
+
312
+ if __name__ == "__main__":
313
+ parser = argparse.ArgumentParser()
314
+
315
+ parser.add_argument(
316
+ "--model_name",
317
+ default="conditional_detr_resnet50",
318
+ type=str,
319
+ help="Name of the CONDITIONAL_DETR model you'd like to convert.",
320
+ )
321
+ parser.add_argument(
322
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
323
+ )
324
+ args = parser.parse_args()
325
+ convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/feature_extraction_conditional_detr.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Conditional DETR."""
16
+
17
+ import warnings
18
+
19
+ from ...image_transforms import rgb_to_id as _rgb_to_id
20
+ from ...utils import logging
21
+ from .image_processing_conditional_detr import ConditionalDetrImageProcessor
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def rgb_to_id(x):
28
+ warnings.warn(
29
+ "rgb_to_id has moved and will not be importable from this module from v5. "
30
+ "Please import from transformers.image_transforms instead.",
31
+ FutureWarning,
32
+ )
33
+ return _rgb_to_id(x)
34
+
35
+
36
+ class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor):
37
+ def __init__(self, *args, **kwargs) -> None:
38
+ warnings.warn(
39
+ "The class ConditionalDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
40
+ " Please use ConditionalDetrImageProcessor instead.",
41
+ FutureWarning,
42
+ )
43
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/funnel/__init__.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
28
+ "convert_funnel_original_tf_checkpoint_to_pytorch": [],
29
+ "tokenization_funnel": ["FunnelTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_funnel"] = [
47
+ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "FunnelBaseModel",
49
+ "FunnelForMaskedLM",
50
+ "FunnelForMultipleChoice",
51
+ "FunnelForPreTraining",
52
+ "FunnelForQuestionAnswering",
53
+ "FunnelForSequenceClassification",
54
+ "FunnelForTokenClassification",
55
+ "FunnelModel",
56
+ "FunnelPreTrainedModel",
57
+ "load_tf_weights_in_funnel",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_funnel"] = [
67
+ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFFunnelBaseModel",
69
+ "TFFunnelForMaskedLM",
70
+ "TFFunnelForMultipleChoice",
71
+ "TFFunnelForPreTraining",
72
+ "TFFunnelForQuestionAnswering",
73
+ "TFFunnelForSequenceClassification",
74
+ "TFFunnelForTokenClassification",
75
+ "TFFunnelModel",
76
+ "TFFunnelPreTrainedModel",
77
+ ]
78
+
79
+
80
+ if TYPE_CHECKING:
81
+ from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
82
+ from .tokenization_funnel import FunnelTokenizer
83
+
84
+ try:
85
+ if not is_tokenizers_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .tokenization_funnel_fast import FunnelTokenizerFast
91
+
92
+ try:
93
+ if not is_torch_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_funnel import (
99
+ FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ FunnelBaseModel,
101
+ FunnelForMaskedLM,
102
+ FunnelForMultipleChoice,
103
+ FunnelForPreTraining,
104
+ FunnelForQuestionAnswering,
105
+ FunnelForSequenceClassification,
106
+ FunnelForTokenClassification,
107
+ FunnelModel,
108
+ FunnelPreTrainedModel,
109
+ load_tf_weights_in_funnel,
110
+ )
111
+
112
+ try:
113
+ if not is_tf_available():
114
+ raise OptionalDependencyNotAvailable()
115
+ except OptionalDependencyNotAvailable:
116
+ pass
117
+ else:
118
+ from .modeling_tf_funnel import (
119
+ TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
120
+ TFFunnelBaseModel,
121
+ TFFunnelForMaskedLM,
122
+ TFFunnelForMultipleChoice,
123
+ TFFunnelForPreTraining,
124
+ TFFunnelForQuestionAnswering,
125
+ TFFunnelForSequenceClassification,
126
+ TFFunnelForTokenClassification,
127
+ TFFunnelModel,
128
+ TFFunnelPreTrainedModel,
129
+ )
130
+
131
+ else:
132
+ import sys
133
+
134
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.05 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/configuration_funnel.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/convert_funnel_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_funnel.cpython-310.pyc ADDED
Binary file (46.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_tf_funnel.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel_fast.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, Hugging Face
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Funnel Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class FunnelConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`FunnelModel`] or a [`TFBertModel`]. It is used to
30
+ instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture.
31
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel
32
+ Transformer [funnel-transformer/small](https://huggingface.co/funnel-transformer/small) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 30522):
39
+ Vocabulary size of the Funnel transformer. Defines the number of different tokens that can be represented
40
+ by the `inputs_ids` passed when calling [`FunnelModel`] or [`TFFunnelModel`].
41
+ block_sizes (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
42
+ The sizes of the blocks used in the model.
43
+ block_repeats (`List[int]`, *optional*):
44
+ If passed along, each layer of each block is repeated the number of times indicated.
45
+ num_decoder_layers (`int`, *optional*, defaults to 2):
46
+ The number of layers in the decoder (when not using the base model).
47
+ d_model (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the model's hidden states.
49
+ n_head (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ d_head (`int`, *optional*, defaults to 64):
52
+ Dimensionality of the model's heads.
53
+ d_inner (`int`, *optional*, defaults to 3072):
54
+ Inner dimension in the feed-forward blocks.
55
+ hidden_act (`str` or `callable`, *optional*, defaults to `"gelu_new"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for the attention probabilities.
62
+ activation_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout probability used between the two layers of the feed-forward blocks.
64
+ initializer_range (`float`, *optional*, defaults to 0.1):
65
+ The upper bound of the *uniform initializer* for initializing all weight matrices in attention layers.
66
+ initializer_std (`float`, *optional*):
67
+ The standard deviation of the *normal initializer* for initializing the embedding matrix and the weight of
68
+ linear layers. Will default to 1 for the embedding matrix and the value given by Xavier initialization for
69
+ linear layers.
70
+ layer_norm_eps (`float`, *optional*, defaults to 1e-09):
71
+ The epsilon used by the layer normalization layers.
72
+ pooling_type (`str`, *optional*, defaults to `"mean"`):
73
+ Possible values are `"mean"` or `"max"`. The way pooling is performed at the beginning of each block.
74
+ attention_type (`str`, *optional*, defaults to `"relative_shift"`):
75
+ Possible values are `"relative_shift"` or `"factorized"`. The former is faster on CPU/GPU while the latter
76
+ is faster on TPU.
77
+ separate_cls (`bool`, *optional*, defaults to `True`):
78
+ Whether or not to separate the cls token when applying pooling.
79
+ truncate_seq (`bool`, *optional*, defaults to `True`):
80
+ When using `separate_cls`, whether or not to truncate the last token when pooling, to avoid getting a
81
+ sequence length that is not a multiple of 2.
82
+ pool_q_only (`bool`, *optional*, defaults to `True`):
83
+ Whether or not to apply the pooling only to the query or to query, key and values for the attention layers.
84
+ """
85
+
86
+ model_type = "funnel"
87
+ attribute_map = {
88
+ "hidden_size": "d_model",
89
+ "num_attention_heads": "n_head",
90
+ }
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_size=30522,
95
+ block_sizes=[4, 4, 4],
96
+ block_repeats=None,
97
+ num_decoder_layers=2,
98
+ d_model=768,
99
+ n_head=12,
100
+ d_head=64,
101
+ d_inner=3072,
102
+ hidden_act="gelu_new",
103
+ hidden_dropout=0.1,
104
+ attention_dropout=0.1,
105
+ activation_dropout=0.0,
106
+ initializer_range=0.1,
107
+ initializer_std=None,
108
+ layer_norm_eps=1e-9,
109
+ pooling_type="mean",
110
+ attention_type="relative_shift",
111
+ separate_cls=True,
112
+ truncate_seq=True,
113
+ pool_q_only=True,
114
+ **kwargs,
115
+ ):
116
+ self.vocab_size = vocab_size
117
+ self.block_sizes = block_sizes
118
+ self.block_repeats = [1] * len(block_sizes) if block_repeats is None else block_repeats
119
+ assert len(block_sizes) == len(
120
+ self.block_repeats
121
+ ), "`block_sizes` and `block_repeats` should have the same length."
122
+ self.num_decoder_layers = num_decoder_layers
123
+ self.d_model = d_model
124
+ self.n_head = n_head
125
+ self.d_head = d_head
126
+ self.d_inner = d_inner
127
+ self.hidden_act = hidden_act
128
+ self.hidden_dropout = hidden_dropout
129
+ self.attention_dropout = attention_dropout
130
+ self.activation_dropout = activation_dropout
131
+ self.initializer_range = initializer_range
132
+ self.initializer_std = initializer_std
133
+ self.layer_norm_eps = layer_norm_eps
134
+ assert pooling_type in [
135
+ "mean",
136
+ "max",
137
+ ], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
138
+ self.pooling_type = pooling_type
139
+ assert attention_type in [
140
+ "relative_shift",
141
+ "factorized",
142
+ ], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
143
+ self.attention_type = attention_type
144
+ self.separate_cls = separate_cls
145
+ self.truncate_seq = truncate_seq
146
+ self.pool_q_only = pool_q_only
147
+
148
+ super().__init__(**kwargs)
149
+
150
+ @property
151
+ def num_hidden_layers(self):
152
+ return sum(self.block_sizes)
153
+
154
+ @num_hidden_layers.setter
155
+ def num_hidden_layers(self, value):
156
+ raise NotImplementedError(
157
+ "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`."
158
+ )
159
+
160
+ @property
161
+ def num_blocks(self):
162
+ return len(self.block_sizes)
163
+
164
+ @num_blocks.setter
165
+ def num_blocks(self, value):
166
+ raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.")
venv/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Funnel checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, base_model):
30
+ # Initialise PyTorch model
31
+ config = FunnelConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = FunnelBaseModel(config) if base_model else FunnelModel(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_funnel(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ parser.add_argument(
60
+ "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
61
+ )
62
+ args = parser.parse_args()
63
+ convert_tf_checkpoint_to_pytorch(
64
+ args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
65
+ )
venv/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py ADDED
@@ -0,0 +1,1599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Funnel Transformer model."""
16
+
17
+ import os
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ MaskedLMOutput,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_code_sample_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_funnel import FunnelConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CONFIG_FOR_DOC = "FunnelConfig"
50
+ _CHECKPOINT_FOR_DOC = "funnel-transformer/small"
51
+
52
+
53
+ from ..deprecated._archive_maps import FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
54
+
55
+
56
+ INF = 1e6
57
+
58
+
59
+ def load_tf_weights_in_funnel(model, config, tf_checkpoint_path):
60
+ """Load tf checkpoints in a pytorch model."""
61
+ try:
62
+ import re
63
+
64
+ import numpy as np
65
+ import tensorflow as tf
66
+ except ImportError:
67
+ logger.error(
68
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
69
+ "https://www.tensorflow.org/install/ for installation instructions."
70
+ )
71
+ raise
72
+ tf_path = os.path.abspath(tf_checkpoint_path)
73
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
74
+ # Load weights from TF model
75
+ init_vars = tf.train.list_variables(tf_path)
76
+ names = []
77
+ arrays = []
78
+ for name, shape in init_vars:
79
+ logger.info(f"Loading TF weight {name} with shape {shape}")
80
+ array = tf.train.load_variable(tf_path, name)
81
+ names.append(name)
82
+ arrays.append(array)
83
+
84
+ _layer_map = {
85
+ "k": "k_head",
86
+ "q": "q_head",
87
+ "v": "v_head",
88
+ "o": "post_proj",
89
+ "layer_1": "linear_1",
90
+ "layer_2": "linear_2",
91
+ "rel_attn": "attention",
92
+ "ff": "ffn",
93
+ "kernel": "weight",
94
+ "gamma": "weight",
95
+ "beta": "bias",
96
+ "lookup_table": "weight",
97
+ "word_embedding": "word_embeddings",
98
+ "input": "embeddings",
99
+ }
100
+
101
+ for name, array in zip(names, arrays):
102
+ name = name.split("/")
103
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
104
+ # which are not required for using pretrained model
105
+ if any(
106
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
107
+ for n in name
108
+ ):
109
+ logger.info(f"Skipping {'/'.join(name)}")
110
+ continue
111
+ if name[0] == "generator":
112
+ continue
113
+ pointer = model
114
+ skipped = False
115
+ for m_name in name[1:]:
116
+ if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r"layer_\d+", m_name):
117
+ layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0])
118
+ if layer_index < config.num_hidden_layers:
119
+ block_idx = 0
120
+ while layer_index >= config.block_sizes[block_idx]:
121
+ layer_index -= config.block_sizes[block_idx]
122
+ block_idx += 1
123
+ pointer = pointer.blocks[block_idx][layer_index]
124
+ else:
125
+ layer_index -= config.num_hidden_layers
126
+ pointer = pointer.layers[layer_index]
127
+ elif m_name == "r" and isinstance(pointer, FunnelRelMultiheadAttention):
128
+ pointer = pointer.r_kernel
129
+ break
130
+ elif m_name in _layer_map:
131
+ pointer = getattr(pointer, _layer_map[m_name])
132
+ else:
133
+ try:
134
+ pointer = getattr(pointer, m_name)
135
+ except AttributeError:
136
+ print(f"Skipping {'/'.join(name)}", array.shape)
137
+ skipped = True
138
+ break
139
+ if not skipped:
140
+ if len(pointer.shape) != len(array.shape):
141
+ array = array.reshape(pointer.shape)
142
+ if m_name == "kernel":
143
+ array = np.transpose(array)
144
+ pointer.data = torch.from_numpy(array)
145
+
146
+ return model
147
+
148
+
149
+ class FunnelEmbeddings(nn.Module):
150
+ def __init__(self, config: FunnelConfig) -> None:
151
+ super().__init__()
152
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
153
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
154
+ self.dropout = nn.Dropout(config.hidden_dropout)
155
+
156
+ def forward(
157
+ self, input_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None
158
+ ) -> torch.Tensor:
159
+ if inputs_embeds is None:
160
+ inputs_embeds = self.word_embeddings(input_ids)
161
+ embeddings = self.layer_norm(inputs_embeds)
162
+ embeddings = self.dropout(embeddings)
163
+ return embeddings
164
+
165
+
166
+ class FunnelAttentionStructure(nn.Module):
167
+ """
168
+ Contains helpers for `FunnelRelMultiheadAttention `.
169
+ """
170
+
171
+ cls_token_type_id: int = 2
172
+
173
+ def __init__(self, config: FunnelConfig) -> None:
174
+ super().__init__()
175
+ self.config = config
176
+ self.sin_dropout = nn.Dropout(config.hidden_dropout)
177
+ self.cos_dropout = nn.Dropout(config.hidden_dropout)
178
+ # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was
179
+ # divided.
180
+ self.pooling_mult = None
181
+
182
+ def init_attention_inputs(
183
+ self,
184
+ inputs_embeds: torch.Tensor,
185
+ attention_mask: Optional[torch.Tensor] = None,
186
+ token_type_ids: Optional[torch.Tensor] = None,
187
+ ) -> Tuple[torch.Tensor]:
188
+ """Returns the attention inputs associated to the inputs of the model."""
189
+ # inputs_embeds has shape batch_size x seq_len x d_model
190
+ # attention_mask and token_type_ids have shape batch_size x seq_len
191
+ self.pooling_mult = 1
192
+ self.seq_len = seq_len = inputs_embeds.size(1)
193
+ position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device)
194
+ token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
195
+ cls_mask = (
196
+ nn.functional.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0))
197
+ if self.config.separate_cls
198
+ else None
199
+ )
200
+ return (position_embeds, token_type_mat, attention_mask, cls_mask)
201
+
202
+ def token_type_ids_to_mat(self, token_type_ids: torch.Tensor) -> torch.Tensor:
203
+ """Convert `token_type_ids` to `token_type_mat`."""
204
+ token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None]
205
+ # Treat <cls> as in the same segment as both A & B
206
+ cls_ids = token_type_ids == self.cls_token_type_id
207
+ cls_mat = cls_ids[:, :, None] | cls_ids[:, None]
208
+ return cls_mat | token_type_mat
209
+
210
+ def get_position_embeds(
211
+ self, seq_len: int, dtype: torch.dtype, device: torch.device
212
+ ) -> Union[Tuple[torch.Tensor], List[List[torch.Tensor]]]:
213
+ """
214
+ Create and cache inputs related to relative position encoding. Those are very different depending on whether we
215
+ are using the factorized or the relative shift attention:
216
+
217
+ For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,
218
+ final formula.
219
+
220
+ For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final
221
+ formula.
222
+
223
+ Paper link: https://arxiv.org/abs/2006.03236
224
+ """
225
+ d_model = self.config.d_model
226
+ if self.config.attention_type == "factorized":
227
+ # Notations from the paper, appending A.2.2, final formula.
228
+ # We need to create and return the matrices phi, psi, pi and omega.
229
+ pos_seq = torch.arange(0, seq_len, 1.0, dtype=torch.int64, device=device).to(dtype)
230
+ freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype)
231
+ inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
232
+ sinusoid = pos_seq[:, None] * inv_freq[None]
233
+ sin_embed = torch.sin(sinusoid)
234
+ sin_embed_d = self.sin_dropout(sin_embed)
235
+ cos_embed = torch.cos(sinusoid)
236
+ cos_embed_d = self.cos_dropout(cos_embed)
237
+ # This is different from the formula on the paper...
238
+ phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1)
239
+ psi = torch.cat([cos_embed, sin_embed], dim=-1)
240
+ pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1)
241
+ omega = torch.cat([-sin_embed, cos_embed], dim=-1)
242
+ return (phi, pi, psi, omega)
243
+ else:
244
+ # Notations from the paper, appending A.2.1, final formula.
245
+ # We need to create and return all the possible vectors R for all blocks and shifts.
246
+ freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype)
247
+ inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
248
+ # Maximum relative positions for the first input
249
+ rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=torch.int64, device=device).to(dtype)
250
+ zero_offset = seq_len * 2
251
+ sinusoid = rel_pos_id[:, None] * inv_freq[None]
252
+ sin_embed = self.sin_dropout(torch.sin(sinusoid))
253
+ cos_embed = self.cos_dropout(torch.cos(sinusoid))
254
+ pos_embed = torch.cat([sin_embed, cos_embed], dim=-1)
255
+
256
+ pos = torch.arange(0, seq_len, dtype=torch.int64, device=device).to(dtype)
257
+ pooled_pos = pos
258
+ position_embeds_list = []
259
+ for block_index in range(0, self.config.num_blocks):
260
+ # For each block with block_index > 0, we need two types position embeddings:
261
+ # - Attention(pooled-q, unpooled-kv)
262
+ # - Attention(pooled-q, pooled-kv)
263
+ # For block_index = 0 we only need the second one and leave the first one as None.
264
+
265
+ # First type
266
+ if block_index == 0:
267
+ position_embeds_pooling = None
268
+ else:
269
+ pooled_pos = self.stride_pool_pos(pos, block_index)
270
+
271
+ # construct rel_pos_id
272
+ stride = 2 ** (block_index - 1)
273
+ rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
274
+ rel_pos = rel_pos[:, None] + zero_offset
275
+ rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
276
+ position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos)
277
+
278
+ # Second type
279
+ pos = pooled_pos
280
+ stride = 2**block_index
281
+ rel_pos = self.relative_pos(pos, stride)
282
+
283
+ rel_pos = rel_pos[:, None] + zero_offset
284
+ rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
285
+ position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos)
286
+
287
+ position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
288
+ return position_embeds_list
289
+
290
+ def stride_pool_pos(self, pos_id: torch.Tensor, block_index: int):
291
+ """
292
+ Pool `pos_id` while keeping the cls token separate (if `config.separate_cls=True`).
293
+ """
294
+ if self.config.separate_cls:
295
+ # Under separate <cls>, we treat the <cls> as the first token in
296
+ # the previous block of the 1st real block. Since the 1st real
297
+ # block always has position 1, the position of the previous block
298
+ # will be at `1 - 2 ** block_index`.
299
+ cls_pos = pos_id.new_tensor([-(2**block_index) + 1])
300
+ pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:]
301
+ return torch.cat([cls_pos, pooled_pos_id[::2]], 0)
302
+ else:
303
+ return pos_id[::2]
304
+
305
+ def relative_pos(self, pos: torch.Tensor, stride: int, pooled_pos=None, shift: int = 1) -> torch.Tensor:
306
+ """
307
+ Build the relative positional vector between `pos` and `pooled_pos`.
308
+ """
309
+ if pooled_pos is None:
310
+ pooled_pos = pos
311
+
312
+ ref_point = pooled_pos[0] - pos[0]
313
+ num_remove = shift * len(pooled_pos)
314
+ max_dist = ref_point + num_remove * stride
315
+ min_dist = pooled_pos[0] - pos[-1]
316
+
317
+ return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device)
318
+
319
+ def stride_pool(
320
+ self,
321
+ tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]],
322
+ axis: Union[int, Tuple[int], List[int]],
323
+ ) -> torch.Tensor:
324
+ """
325
+ Perform pooling by stride slicing the tensor along the given axis.
326
+ """
327
+ if tensor is None:
328
+ return None
329
+
330
+ # Do the stride pool recursively if axis is a list or a tuple of ints.
331
+ if isinstance(axis, (list, tuple)):
332
+ for ax in axis:
333
+ tensor = self.stride_pool(tensor, ax)
334
+ return tensor
335
+
336
+ # Do the stride pool recursively if tensor is a list or tuple of tensors.
337
+ if isinstance(tensor, (tuple, list)):
338
+ return type(tensor)(self.stride_pool(x, axis) for x in tensor)
339
+
340
+ # Deal with negative axis
341
+ axis %= tensor.ndim
342
+
343
+ axis_slice = (
344
+ slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2)
345
+ )
346
+ enc_slice = [slice(None)] * axis + [axis_slice]
347
+ if self.config.separate_cls:
348
+ cls_slice = [slice(None)] * axis + [slice(None, 1)]
349
+ tensor = torch.cat([tensor[cls_slice], tensor], axis=axis)
350
+ return tensor[enc_slice]
351
+
352
+ def pool_tensor(
353
+ self, tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]], mode: str = "mean", stride: int = 2
354
+ ) -> torch.Tensor:
355
+ """Apply 1D pooling to a tensor of size [B x T (x H)]."""
356
+ if tensor is None:
357
+ return None
358
+
359
+ # Do the pool recursively if tensor is a list or tuple of tensors.
360
+ if isinstance(tensor, (tuple, list)):
361
+ return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
362
+
363
+ if self.config.separate_cls:
364
+ suffix = tensor[:, :-1] if self.config.truncate_seq else tensor
365
+ tensor = torch.cat([tensor[:, :1], suffix], dim=1)
366
+
367
+ ndim = tensor.ndim
368
+ if ndim == 2:
369
+ tensor = tensor[:, None, :, None]
370
+ elif ndim == 3:
371
+ tensor = tensor[:, None, :, :]
372
+ # Stride is applied on the second-to-last dimension.
373
+ stride = (stride, 1)
374
+
375
+ if mode == "mean":
376
+ tensor = nn.functional.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True)
377
+ elif mode == "max":
378
+ tensor = nn.functional.max_pool2d(tensor, stride, stride=stride, ceil_mode=True)
379
+ elif mode == "min":
380
+ tensor = -nn.functional.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True)
381
+ else:
382
+ raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
383
+
384
+ if ndim == 2:
385
+ return tensor[:, 0, :, 0]
386
+ elif ndim == 3:
387
+ return tensor[:, 0]
388
+ return tensor
389
+
390
+ def pre_attention_pooling(
391
+ self, output, attention_inputs: Tuple[torch.Tensor]
392
+ ) -> Tuple[torch.Tensor, Tuple[torch.Tensor]]:
393
+ """Pool `output` and the proper parts of `attention_inputs` before the attention layer."""
394
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
395
+ if self.config.pool_q_only:
396
+ if self.config.attention_type == "factorized":
397
+ position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
398
+ token_type_mat = self.stride_pool(token_type_mat, 1)
399
+ cls_mask = self.stride_pool(cls_mask, 0)
400
+ output = self.pool_tensor(output, mode=self.config.pooling_type)
401
+ else:
402
+ self.pooling_mult *= 2
403
+ if self.config.attention_type == "factorized":
404
+ position_embeds = self.stride_pool(position_embeds, 0)
405
+ token_type_mat = self.stride_pool(token_type_mat, [1, 2])
406
+ cls_mask = self.stride_pool(cls_mask, [1, 2])
407
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
408
+ output = self.pool_tensor(output, mode=self.config.pooling_type)
409
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
410
+ return output, attention_inputs
411
+
412
+ def post_attention_pooling(self, attention_inputs: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]:
413
+ """Pool the proper parts of `attention_inputs` after the attention layer."""
414
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
415
+ if self.config.pool_q_only:
416
+ self.pooling_mult *= 2
417
+ if self.config.attention_type == "factorized":
418
+ position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
419
+ token_type_mat = self.stride_pool(token_type_mat, 2)
420
+ cls_mask = self.stride_pool(cls_mask, 1)
421
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
422
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
423
+ return attention_inputs
424
+
425
+
426
+ def _relative_shift_gather(positional_attn: torch.Tensor, context_len: int, shift: int) -> torch.Tensor:
427
+ batch_size, n_head, seq_len, max_rel_len = positional_attn.shape
428
+ # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
429
+
430
+ # What's next is the same as doing the following gather, which might be clearer code but less efficient.
431
+ # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
432
+ # # matrix of context_len + i-j
433
+ # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
434
+
435
+ positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
436
+ positional_attn = positional_attn[:, :, shift:, :]
437
+ positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])
438
+ positional_attn = positional_attn[..., :context_len]
439
+ return positional_attn
440
+
441
+
442
+ class FunnelRelMultiheadAttention(nn.Module):
443
+ def __init__(self, config: FunnelConfig, block_index: int) -> None:
444
+ super().__init__()
445
+ self.config = config
446
+ self.block_index = block_index
447
+ d_model, n_head, d_head = config.d_model, config.n_head, config.d_head
448
+
449
+ self.hidden_dropout = nn.Dropout(config.hidden_dropout)
450
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
451
+
452
+ self.q_head = nn.Linear(d_model, n_head * d_head, bias=False)
453
+ self.k_head = nn.Linear(d_model, n_head * d_head)
454
+ self.v_head = nn.Linear(d_model, n_head * d_head)
455
+
456
+ self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head]))
457
+ self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head]))
458
+ self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head]))
459
+ self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head]))
460
+ self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head]))
461
+
462
+ self.post_proj = nn.Linear(n_head * d_head, d_model)
463
+ self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps)
464
+ self.scale = 1.0 / (d_head**0.5)
465
+
466
+ def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
467
+ """Relative attention score for the positional encodings"""
468
+ # q_head has shape batch_size x sea_len x n_head x d_head
469
+ if self.config.attention_type == "factorized":
470
+ # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)
471
+ # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model
472
+ phi, pi, psi, omega = position_embeds
473
+ # Shape n_head x d_head
474
+ u = self.r_r_bias * self.scale
475
+ # Shape d_model x n_head x d_head
476
+ w_r = self.r_kernel
477
+
478
+ # Shape batch_size x sea_len x n_head x d_model
479
+ q_r_attention = torch.einsum("binh,dnh->bind", q_head + u, w_r)
480
+ q_r_attention_1 = q_r_attention * phi[:, None]
481
+ q_r_attention_2 = q_r_attention * pi[:, None]
482
+
483
+ # Shape batch_size x n_head x seq_len x context_len
484
+ positional_attn = torch.einsum("bind,jd->bnij", q_r_attention_1, psi) + torch.einsum(
485
+ "bind,jd->bnij", q_r_attention_2, omega
486
+ )
487
+ else:
488
+ shift = 2 if q_head.shape[1] != context_len else 1
489
+ # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
490
+ # Grab the proper positional encoding, shape max_rel_len x d_model
491
+ r = position_embeds[self.block_index][shift - 1]
492
+ # Shape n_head x d_head
493
+ v = self.r_r_bias * self.scale
494
+ # Shape d_model x n_head x d_head
495
+ w_r = self.r_kernel
496
+
497
+ # Shape max_rel_len x n_head x d_model
498
+ r_head = torch.einsum("td,dnh->tnh", r, w_r)
499
+ # Shape batch_size x n_head x seq_len x max_rel_len
500
+ positional_attn = torch.einsum("binh,tnh->bnit", q_head + v, r_head)
501
+ # Shape batch_size x n_head x seq_len x context_len
502
+ positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
503
+
504
+ if cls_mask is not None:
505
+ positional_attn *= cls_mask
506
+ return positional_attn
507
+
508
+ def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
509
+ """Relative attention score for the token_type_ids"""
510
+ if token_type_mat is None:
511
+ return 0
512
+ batch_size, seq_len, context_len = token_type_mat.shape
513
+ # q_head has shape batch_size x seq_len x n_head x d_head
514
+ # Shape n_head x d_head
515
+ r_s_bias = self.r_s_bias * self.scale
516
+
517
+ # Shape batch_size x n_head x seq_len x 2
518
+ token_type_bias = torch.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
519
+ # Shape batch_size x n_head x seq_len x context_len
520
+ token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len])
521
+ # Shapes batch_size x n_head x seq_len
522
+ diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1)
523
+ # Shape batch_size x n_head x seq_len x context_len
524
+ token_type_attn = torch.where(
525
+ token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape)
526
+ )
527
+
528
+ if cls_mask is not None:
529
+ token_type_attn *= cls_mask
530
+ return token_type_attn
531
+
532
+ def forward(
533
+ self,
534
+ query: torch.Tensor,
535
+ key: torch.Tensor,
536
+ value: torch.Tensor,
537
+ attention_inputs: Tuple[torch.Tensor],
538
+ output_attentions: bool = False,
539
+ ) -> Tuple[torch.Tensor, ...]:
540
+ # query has shape batch_size x seq_len x d_model
541
+ # key and value have shapes batch_size x context_len x d_model
542
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
543
+
544
+ batch_size, seq_len, _ = query.shape
545
+ context_len = key.shape[1]
546
+ n_head, d_head = self.config.n_head, self.config.d_head
547
+
548
+ # Shape batch_size x seq_len x n_head x d_head
549
+ q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head)
550
+ # Shapes batch_size x context_len x n_head x d_head
551
+ k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head)
552
+ v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head)
553
+
554
+ q_head = q_head * self.scale
555
+ # Shape n_head x d_head
556
+ r_w_bias = self.r_w_bias * self.scale
557
+ # Shapes batch_size x n_head x seq_len x context_len
558
+ content_score = torch.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
559
+ positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)
560
+ token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
561
+
562
+ # merge attention scores
563
+ attn_score = content_score + positional_attn + token_type_attn
564
+
565
+ # precision safe in case of mixed precision training
566
+ dtype = attn_score.dtype
567
+ attn_score = attn_score.float()
568
+ # perform masking
569
+ if attention_mask is not None:
570
+ attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float())
571
+ # attention probability
572
+ attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype)
573
+ attn_prob = self.attention_dropout(attn_prob)
574
+
575
+ # attention output, shape batch_size x seq_len x n_head x d_head
576
+ attn_vec = torch.einsum("bnij,bjnd->bind", attn_prob, v_head)
577
+
578
+ # Shape shape batch_size x seq_len x d_model
579
+ attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head))
580
+ attn_out = self.hidden_dropout(attn_out)
581
+
582
+ output = self.layer_norm(query + attn_out)
583
+ return (output, attn_prob) if output_attentions else (output,)
584
+
585
+
586
+ class FunnelPositionwiseFFN(nn.Module):
587
+ def __init__(self, config: FunnelConfig) -> None:
588
+ super().__init__()
589
+ self.linear_1 = nn.Linear(config.d_model, config.d_inner)
590
+ self.activation_function = ACT2FN[config.hidden_act]
591
+ self.activation_dropout = nn.Dropout(config.activation_dropout)
592
+ self.linear_2 = nn.Linear(config.d_inner, config.d_model)
593
+ self.dropout = nn.Dropout(config.hidden_dropout)
594
+ self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)
595
+
596
+ def forward(self, hidden: torch.Tensor) -> torch.Tensor:
597
+ h = self.linear_1(hidden)
598
+ h = self.activation_function(h)
599
+ h = self.activation_dropout(h)
600
+ h = self.linear_2(h)
601
+ h = self.dropout(h)
602
+ return self.layer_norm(hidden + h)
603
+
604
+
605
+ class FunnelLayer(nn.Module):
606
+ def __init__(self, config: FunnelConfig, block_index: int) -> None:
607
+ super().__init__()
608
+ self.attention = FunnelRelMultiheadAttention(config, block_index)
609
+ self.ffn = FunnelPositionwiseFFN(config)
610
+
611
+ def forward(
612
+ self,
613
+ query: torch.Tensor,
614
+ key: torch.Tensor,
615
+ value: torch.Tensor,
616
+ attention_inputs,
617
+ output_attentions: bool = False,
618
+ ) -> Tuple:
619
+ attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions)
620
+ output = self.ffn(attn[0])
621
+ return (output, attn[1]) if output_attentions else (output,)
622
+
623
+
624
+ class FunnelEncoder(nn.Module):
625
+ def __init__(self, config: FunnelConfig) -> None:
626
+ super().__init__()
627
+ self.config = config
628
+ self.attention_structure = FunnelAttentionStructure(config)
629
+ self.blocks = nn.ModuleList(
630
+ [
631
+ nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)])
632
+ for block_index, block_size in enumerate(config.block_sizes)
633
+ ]
634
+ )
635
+
636
+ def forward(
637
+ self,
638
+ inputs_embeds: torch.Tensor,
639
+ attention_mask: Optional[torch.Tensor] = None,
640
+ token_type_ids: Optional[torch.Tensor] = None,
641
+ output_attentions: bool = False,
642
+ output_hidden_states: bool = False,
643
+ return_dict: bool = True,
644
+ ) -> Union[Tuple, BaseModelOutput]:
645
+ # The pooling is not implemented on long tensors, so we convert this mask.
646
+ attention_mask = attention_mask.type_as(inputs_embeds)
647
+ attention_inputs = self.attention_structure.init_attention_inputs(
648
+ inputs_embeds,
649
+ attention_mask=attention_mask,
650
+ token_type_ids=token_type_ids,
651
+ )
652
+ hidden = inputs_embeds
653
+
654
+ all_hidden_states = (inputs_embeds,) if output_hidden_states else None
655
+ all_attentions = () if output_attentions else None
656
+
657
+ for block_index, block in enumerate(self.blocks):
658
+ pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1)
659
+ pooling_flag = pooling_flag and block_index > 0
660
+ if pooling_flag:
661
+ pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(
662
+ hidden, attention_inputs
663
+ )
664
+ for layer_index, layer in enumerate(block):
665
+ for repeat_index in range(self.config.block_repeats[block_index]):
666
+ do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
667
+ if do_pooling:
668
+ query = pooled_hidden
669
+ key = value = hidden if self.config.pool_q_only else pooled_hidden
670
+ else:
671
+ query = key = value = hidden
672
+ layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions)
673
+ hidden = layer_output[0]
674
+ if do_pooling:
675
+ attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)
676
+
677
+ if output_attentions:
678
+ all_attentions = all_attentions + layer_output[1:]
679
+ if output_hidden_states:
680
+ all_hidden_states = all_hidden_states + (hidden,)
681
+
682
+ if not return_dict:
683
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
684
+ return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
685
+
686
+
687
+ def upsample(
688
+ x: torch.Tensor, stride: int, target_len: int, separate_cls: bool = True, truncate_seq: bool = False
689
+ ) -> torch.Tensor:
690
+ """
691
+ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
692
+ """
693
+ if stride == 1:
694
+ return x
695
+ if separate_cls:
696
+ cls = x[:, :1]
697
+ x = x[:, 1:]
698
+ output = torch.repeat_interleave(x, repeats=stride, dim=1)
699
+ if separate_cls:
700
+ if truncate_seq:
701
+ output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0))
702
+ output = output[:, : target_len - 1]
703
+ output = torch.cat([cls, output], dim=1)
704
+ else:
705
+ output = output[:, :target_len]
706
+ return output
707
+
708
+
709
+ class FunnelDecoder(nn.Module):
710
+ def __init__(self, config: FunnelConfig) -> None:
711
+ super().__init__()
712
+ self.config = config
713
+ self.attention_structure = FunnelAttentionStructure(config)
714
+ self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)])
715
+
716
+ def forward(
717
+ self,
718
+ final_hidden: torch.Tensor,
719
+ first_block_hidden: torch.Tensor,
720
+ attention_mask: Optional[torch.Tensor] = None,
721
+ token_type_ids: Optional[torch.Tensor] = None,
722
+ output_attentions: bool = False,
723
+ output_hidden_states: bool = False,
724
+ return_dict: bool = True,
725
+ ) -> Union[Tuple, BaseModelOutput]:
726
+ upsampled_hidden = upsample(
727
+ final_hidden,
728
+ stride=2 ** (len(self.config.block_sizes) - 1),
729
+ target_len=first_block_hidden.shape[1],
730
+ separate_cls=self.config.separate_cls,
731
+ truncate_seq=self.config.truncate_seq,
732
+ )
733
+
734
+ hidden = upsampled_hidden + first_block_hidden
735
+ all_hidden_states = (hidden,) if output_hidden_states else None
736
+ all_attentions = () if output_attentions else None
737
+
738
+ attention_inputs = self.attention_structure.init_attention_inputs(
739
+ hidden,
740
+ attention_mask=attention_mask,
741
+ token_type_ids=token_type_ids,
742
+ )
743
+
744
+ for layer in self.layers:
745
+ layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions)
746
+ hidden = layer_output[0]
747
+
748
+ if output_attentions:
749
+ all_attentions = all_attentions + layer_output[1:]
750
+ if output_hidden_states:
751
+ all_hidden_states = all_hidden_states + (hidden,)
752
+
753
+ if not return_dict:
754
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
755
+ return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
756
+
757
+
758
+ class FunnelDiscriminatorPredictions(nn.Module):
759
+ """Prediction module for the discriminator, made up of two dense layers."""
760
+
761
+ def __init__(self, config: FunnelConfig) -> None:
762
+ super().__init__()
763
+ self.config = config
764
+ self.dense = nn.Linear(config.d_model, config.d_model)
765
+ self.dense_prediction = nn.Linear(config.d_model, 1)
766
+
767
+ def forward(self, discriminator_hidden_states: torch.Tensor) -> torch.Tensor:
768
+ hidden_states = self.dense(discriminator_hidden_states)
769
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
770
+ logits = self.dense_prediction(hidden_states).squeeze(-1)
771
+ return logits
772
+
773
+
774
+ class FunnelPreTrainedModel(PreTrainedModel):
775
+ """
776
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
777
+ models.
778
+ """
779
+
780
+ config_class = FunnelConfig
781
+ load_tf_weights = load_tf_weights_in_funnel
782
+ base_model_prefix = "funnel"
783
+
784
+ def _init_weights(self, module):
785
+ classname = module.__class__.__name__
786
+ if classname.find("Linear") != -1:
787
+ if getattr(module, "weight", None) is not None:
788
+ if self.config.initializer_std is None:
789
+ fan_out, fan_in = module.weight.shape
790
+ std = np.sqrt(1.0 / float(fan_in + fan_out))
791
+ else:
792
+ std = self.config.initializer_std
793
+ nn.init.normal_(module.weight, std=std)
794
+ if getattr(module, "bias", None) is not None:
795
+ nn.init.constant_(module.bias, 0.0)
796
+ elif classname == "FunnelRelMultiheadAttention":
797
+ nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range)
798
+ nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range)
799
+ nn.init.uniform_(module.r_kernel, b=self.config.initializer_range)
800
+ nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range)
801
+ nn.init.uniform_(module.seg_embed, b=self.config.initializer_range)
802
+ elif classname == "FunnelEmbeddings":
803
+ std = 1.0 if self.config.initializer_std is None else self.config.initializer_std
804
+ nn.init.normal_(module.word_embeddings.weight, std=std)
805
+ if module.word_embeddings.padding_idx is not None:
806
+ module.word_embeddings.weight.data[module.padding_idx].zero_()
807
+
808
+
809
+ class FunnelClassificationHead(nn.Module):
810
+ def __init__(self, config: FunnelConfig, n_labels: int) -> None:
811
+ super().__init__()
812
+ self.linear_hidden = nn.Linear(config.d_model, config.d_model)
813
+ self.dropout = nn.Dropout(config.hidden_dropout)
814
+ self.linear_out = nn.Linear(config.d_model, n_labels)
815
+
816
+ def forward(self, hidden: torch.Tensor) -> torch.Tensor:
817
+ hidden = self.linear_hidden(hidden)
818
+ hidden = torch.tanh(hidden)
819
+ hidden = self.dropout(hidden)
820
+ return self.linear_out(hidden)
821
+
822
+
823
+ @dataclass
824
+ class FunnelForPreTrainingOutput(ModelOutput):
825
+ """
826
+ Output type of [`FunnelForPreTraining`].
827
+
828
+ Args:
829
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
830
+ Total loss of the ELECTRA-style objective.
831
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
832
+ Prediction scores of the head (scores for each token before SoftMax).
833
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
834
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
835
+ shape `(batch_size, sequence_length, hidden_size)`.
836
+
837
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
838
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
839
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
840
+ sequence_length)`.
841
+
842
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
843
+ heads.
844
+ """
845
+
846
+ loss: Optional[torch.FloatTensor] = None
847
+ logits: torch.FloatTensor = None
848
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
849
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
850
+
851
+
852
+ FUNNEL_START_DOCSTRING = r"""
853
+
854
+ The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
855
+ Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
856
+
857
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
858
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
859
+ etc.)
860
+
861
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
862
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
863
+ and behavior.
864
+
865
+ Parameters:
866
+ config ([`FunnelConfig`]): Model configuration class with all the parameters of the model.
867
+ Initializing with a config file does not load the weights associated with the model, only the
868
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
869
+ """
870
+
871
+ FUNNEL_INPUTS_DOCSTRING = r"""
872
+ Args:
873
+ input_ids (`torch.LongTensor` of shape `({0})`):
874
+ Indices of input sequence tokens in the vocabulary.
875
+
876
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
877
+ [`PreTrainedTokenizer.__call__`] for details.
878
+
879
+ [What are input IDs?](../glossary#input-ids)
880
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
881
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
882
+
883
+ - 1 for tokens that are **not masked**,
884
+ - 0 for tokens that are **masked**.
885
+
886
+ [What are attention masks?](../glossary#attention-mask)
887
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
888
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
889
+ 1]`:
890
+
891
+ - 0 corresponds to a *sentence A* token,
892
+ - 1 corresponds to a *sentence B* token.
893
+
894
+ [What are token type IDs?](../glossary#token-type-ids)
895
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
896
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
897
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
898
+ model's internal embedding lookup matrix.
899
+ output_attentions (`bool`, *optional*):
900
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
901
+ tensors for more detail.
902
+ output_hidden_states (`bool`, *optional*):
903
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
904
+ more detail.
905
+ return_dict (`bool`, *optional*):
906
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
907
+ """
908
+
909
+
910
+ @add_start_docstrings(
911
+ """
912
+ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
913
+ decoder) or any task-specific head on top.
914
+ """,
915
+ FUNNEL_START_DOCSTRING,
916
+ )
917
+ class FunnelBaseModel(FunnelPreTrainedModel):
918
+ def __init__(self, config: FunnelConfig) -> None:
919
+ super().__init__(config)
920
+
921
+ self.embeddings = FunnelEmbeddings(config)
922
+ self.encoder = FunnelEncoder(config)
923
+
924
+ # Initialize weights and apply final processing
925
+ self.post_init()
926
+
927
+ def get_input_embeddings(self) -> nn.Embedding:
928
+ return self.embeddings.word_embeddings
929
+
930
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
931
+ self.embeddings.word_embeddings = new_embeddings
932
+
933
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
934
+ @add_code_sample_docstrings(
935
+ checkpoint="funnel-transformer/small-base",
936
+ output_type=BaseModelOutput,
937
+ config_class=_CONFIG_FOR_DOC,
938
+ )
939
+ def forward(
940
+ self,
941
+ input_ids: Optional[torch.Tensor] = None,
942
+ attention_mask: Optional[torch.Tensor] = None,
943
+ token_type_ids: Optional[torch.Tensor] = None,
944
+ position_ids: Optional[torch.Tensor] = None,
945
+ head_mask: Optional[torch.Tensor] = None,
946
+ inputs_embeds: Optional[torch.Tensor] = None,
947
+ output_attentions: Optional[bool] = None,
948
+ output_hidden_states: Optional[bool] = None,
949
+ return_dict: Optional[bool] = None,
950
+ ) -> Union[Tuple, BaseModelOutput]:
951
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
952
+ output_hidden_states = (
953
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
954
+ )
955
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
956
+
957
+ if input_ids is not None and inputs_embeds is not None:
958
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
959
+ elif input_ids is not None:
960
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
961
+ input_shape = input_ids.size()
962
+ elif inputs_embeds is not None:
963
+ input_shape = inputs_embeds.size()[:-1]
964
+ else:
965
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
966
+
967
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
968
+
969
+ if attention_mask is None:
970
+ attention_mask = torch.ones(input_shape, device=device)
971
+ if token_type_ids is None:
972
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
973
+
974
+ # TODO: deal with head_mask
975
+ if inputs_embeds is None:
976
+ inputs_embeds = self.embeddings(input_ids)
977
+
978
+ encoder_outputs = self.encoder(
979
+ inputs_embeds,
980
+ attention_mask=attention_mask,
981
+ token_type_ids=token_type_ids,
982
+ output_attentions=output_attentions,
983
+ output_hidden_states=output_hidden_states,
984
+ return_dict=return_dict,
985
+ )
986
+
987
+ return encoder_outputs
988
+
989
+
990
+ @add_start_docstrings(
991
+ "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
992
+ FUNNEL_START_DOCSTRING,
993
+ )
994
+ class FunnelModel(FunnelPreTrainedModel):
995
+ def __init__(self, config: FunnelConfig) -> None:
996
+ super().__init__(config)
997
+ self.config = config
998
+ self.embeddings = FunnelEmbeddings(config)
999
+ self.encoder = FunnelEncoder(config)
1000
+ self.decoder = FunnelDecoder(config)
1001
+
1002
+ # Initialize weights and apply final processing
1003
+ self.post_init()
1004
+
1005
+ def get_input_embeddings(self) -> nn.Embedding:
1006
+ return self.embeddings.word_embeddings
1007
+
1008
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
1009
+ self.embeddings.word_embeddings = new_embeddings
1010
+
1011
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1012
+ @add_code_sample_docstrings(
1013
+ checkpoint=_CHECKPOINT_FOR_DOC,
1014
+ output_type=BaseModelOutput,
1015
+ config_class=_CONFIG_FOR_DOC,
1016
+ )
1017
+ def forward(
1018
+ self,
1019
+ input_ids: Optional[torch.Tensor] = None,
1020
+ attention_mask: Optional[torch.Tensor] = None,
1021
+ token_type_ids: Optional[torch.Tensor] = None,
1022
+ inputs_embeds: Optional[torch.Tensor] = None,
1023
+ output_attentions: Optional[bool] = None,
1024
+ output_hidden_states: Optional[bool] = None,
1025
+ return_dict: Optional[bool] = None,
1026
+ ) -> Union[Tuple, BaseModelOutput]:
1027
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1028
+ output_hidden_states = (
1029
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1030
+ )
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+
1033
+ if input_ids is not None and inputs_embeds is not None:
1034
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1035
+ elif input_ids is not None:
1036
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1037
+ input_shape = input_ids.size()
1038
+ elif inputs_embeds is not None:
1039
+ input_shape = inputs_embeds.size()[:-1]
1040
+ else:
1041
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1042
+
1043
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1044
+
1045
+ if attention_mask is None:
1046
+ attention_mask = torch.ones(input_shape, device=device)
1047
+ if token_type_ids is None:
1048
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1049
+
1050
+ # TODO: deal with head_mask
1051
+ if inputs_embeds is None:
1052
+ inputs_embeds = self.embeddings(input_ids)
1053
+
1054
+ encoder_outputs = self.encoder(
1055
+ inputs_embeds,
1056
+ attention_mask=attention_mask,
1057
+ token_type_ids=token_type_ids,
1058
+ output_attentions=output_attentions,
1059
+ output_hidden_states=True,
1060
+ return_dict=return_dict,
1061
+ )
1062
+
1063
+ decoder_outputs = self.decoder(
1064
+ final_hidden=encoder_outputs[0],
1065
+ first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]],
1066
+ attention_mask=attention_mask,
1067
+ token_type_ids=token_type_ids,
1068
+ output_attentions=output_attentions,
1069
+ output_hidden_states=output_hidden_states,
1070
+ return_dict=return_dict,
1071
+ )
1072
+
1073
+ if not return_dict:
1074
+ idx = 0
1075
+ outputs = (decoder_outputs[0],)
1076
+ if output_hidden_states:
1077
+ idx += 1
1078
+ outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
1079
+ if output_attentions:
1080
+ idx += 1
1081
+ outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
1082
+ return outputs
1083
+
1084
+ return BaseModelOutput(
1085
+ last_hidden_state=decoder_outputs[0],
1086
+ hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)
1087
+ if output_hidden_states
1088
+ else None,
1089
+ attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,
1090
+ )
1091
+
1092
+
1093
+ add_start_docstrings(
1094
+ """
1095
+ Funnel Transformer model with a binary classification head on top as used during pretraining for identifying
1096
+ generated tokens.
1097
+ """,
1098
+ FUNNEL_START_DOCSTRING,
1099
+ )
1100
+
1101
+
1102
+ class FunnelForPreTraining(FunnelPreTrainedModel):
1103
+ def __init__(self, config: FunnelConfig) -> None:
1104
+ super().__init__(config)
1105
+
1106
+ self.funnel = FunnelModel(config)
1107
+ self.discriminator_predictions = FunnelDiscriminatorPredictions(config)
1108
+ # Initialize weights and apply final processing
1109
+ self.post_init()
1110
+
1111
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1112
+ @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1113
+ def forward(
1114
+ self,
1115
+ input_ids: Optional[torch.Tensor] = None,
1116
+ attention_mask: Optional[torch.Tensor] = None,
1117
+ token_type_ids: Optional[torch.Tensor] = None,
1118
+ inputs_embeds: Optional[torch.Tensor] = None,
1119
+ labels: Optional[torch.Tensor] = None,
1120
+ output_attentions: Optional[bool] = None,
1121
+ output_hidden_states: Optional[bool] = None,
1122
+ return_dict: Optional[bool] = None,
1123
+ ) -> Union[Tuple, FunnelForPreTrainingOutput]:
1124
+ r"""
1125
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1126
+ Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see `input_ids`
1127
+ docstring) Indices should be in `[0, 1]`:
1128
+
1129
+ - 0 indicates the token is an original token,
1130
+ - 1 indicates the token was replaced.
1131
+
1132
+ Returns:
1133
+
1134
+ Examples:
1135
+
1136
+ ```python
1137
+ >>> from transformers import AutoTokenizer, FunnelForPreTraining
1138
+ >>> import torch
1139
+
1140
+ >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small")
1141
+ >>> model = FunnelForPreTraining.from_pretrained("funnel-transformer/small")
1142
+
1143
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1144
+ >>> logits = model(**inputs).logits
1145
+ ```"""
1146
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1147
+
1148
+ discriminator_hidden_states = self.funnel(
1149
+ input_ids,
1150
+ attention_mask=attention_mask,
1151
+ token_type_ids=token_type_ids,
1152
+ inputs_embeds=inputs_embeds,
1153
+ output_attentions=output_attentions,
1154
+ output_hidden_states=output_hidden_states,
1155
+ return_dict=return_dict,
1156
+ )
1157
+ discriminator_sequence_output = discriminator_hidden_states[0]
1158
+
1159
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1160
+
1161
+ loss = None
1162
+ if labels is not None:
1163
+ loss_fct = nn.BCEWithLogitsLoss()
1164
+ if attention_mask is not None:
1165
+ active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
1166
+ active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
1167
+ active_labels = labels[active_loss]
1168
+ loss = loss_fct(active_logits, active_labels.float())
1169
+ else:
1170
+ loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
1171
+
1172
+ if not return_dict:
1173
+ output = (logits,) + discriminator_hidden_states[1:]
1174
+ return ((loss,) + output) if loss is not None else output
1175
+
1176
+ return FunnelForPreTrainingOutput(
1177
+ loss=loss,
1178
+ logits=logits,
1179
+ hidden_states=discriminator_hidden_states.hidden_states,
1180
+ attentions=discriminator_hidden_states.attentions,
1181
+ )
1182
+
1183
+
1184
+ @add_start_docstrings("""Funnel Transformer Model with a `language modeling` head on top.""", FUNNEL_START_DOCSTRING)
1185
+ class FunnelForMaskedLM(FunnelPreTrainedModel):
1186
+ _tied_weights_keys = ["lm_head.weight"]
1187
+
1188
+ def __init__(self, config: FunnelConfig) -> None:
1189
+ super().__init__(config)
1190
+
1191
+ self.funnel = FunnelModel(config)
1192
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size)
1193
+
1194
+ # Initialize weights and apply final processing
1195
+ self.post_init()
1196
+
1197
+ def get_output_embeddings(self) -> nn.Linear:
1198
+ return self.lm_head
1199
+
1200
+ def set_output_embeddings(self, new_embeddings: nn.Embedding) -> None:
1201
+ self.lm_head = new_embeddings
1202
+
1203
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1204
+ @add_code_sample_docstrings(
1205
+ checkpoint=_CHECKPOINT_FOR_DOC,
1206
+ output_type=MaskedLMOutput,
1207
+ config_class=_CONFIG_FOR_DOC,
1208
+ mask="<mask>",
1209
+ )
1210
+ def forward(
1211
+ self,
1212
+ input_ids: Optional[torch.Tensor] = None,
1213
+ attention_mask: Optional[torch.Tensor] = None,
1214
+ token_type_ids: Optional[torch.Tensor] = None,
1215
+ inputs_embeds: Optional[torch.Tensor] = None,
1216
+ labels: Optional[torch.Tensor] = None,
1217
+ output_attentions: Optional[bool] = None,
1218
+ output_hidden_states: Optional[bool] = None,
1219
+ return_dict: Optional[bool] = None,
1220
+ ) -> Union[Tuple, MaskedLMOutput]:
1221
+ r"""
1222
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1223
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1224
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1225
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1226
+ """
1227
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1228
+
1229
+ outputs = self.funnel(
1230
+ input_ids,
1231
+ attention_mask=attention_mask,
1232
+ token_type_ids=token_type_ids,
1233
+ inputs_embeds=inputs_embeds,
1234
+ output_attentions=output_attentions,
1235
+ output_hidden_states=output_hidden_states,
1236
+ return_dict=return_dict,
1237
+ )
1238
+
1239
+ last_hidden_state = outputs[0]
1240
+ prediction_logits = self.lm_head(last_hidden_state)
1241
+
1242
+ masked_lm_loss = None
1243
+ if labels is not None:
1244
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1245
+ masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1))
1246
+
1247
+ if not return_dict:
1248
+ output = (prediction_logits,) + outputs[1:]
1249
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1250
+
1251
+ return MaskedLMOutput(
1252
+ loss=masked_lm_loss,
1253
+ logits=prediction_logits,
1254
+ hidden_states=outputs.hidden_states,
1255
+ attentions=outputs.attentions,
1256
+ )
1257
+
1258
+
1259
+ @add_start_docstrings(
1260
+ """
1261
+ Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the
1262
+ first timestep of the last hidden state) e.g. for GLUE tasks.
1263
+ """,
1264
+ FUNNEL_START_DOCSTRING,
1265
+ )
1266
+ class FunnelForSequenceClassification(FunnelPreTrainedModel):
1267
+ def __init__(self, config: FunnelConfig) -> None:
1268
+ super().__init__(config)
1269
+ self.num_labels = config.num_labels
1270
+ self.config = config
1271
+
1272
+ self.funnel = FunnelBaseModel(config)
1273
+ self.classifier = FunnelClassificationHead(config, config.num_labels)
1274
+ # Initialize weights and apply final processing
1275
+ self.post_init()
1276
+
1277
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1278
+ @add_code_sample_docstrings(
1279
+ checkpoint="funnel-transformer/small-base",
1280
+ output_type=SequenceClassifierOutput,
1281
+ config_class=_CONFIG_FOR_DOC,
1282
+ )
1283
+ def forward(
1284
+ self,
1285
+ input_ids: Optional[torch.Tensor] = None,
1286
+ attention_mask: Optional[torch.Tensor] = None,
1287
+ token_type_ids: Optional[torch.Tensor] = None,
1288
+ inputs_embeds: Optional[torch.Tensor] = None,
1289
+ labels: Optional[torch.Tensor] = None,
1290
+ output_attentions: Optional[bool] = None,
1291
+ output_hidden_states: Optional[bool] = None,
1292
+ return_dict: Optional[bool] = None,
1293
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1294
+ r"""
1295
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1296
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1297
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1298
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1299
+ """
1300
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1301
+
1302
+ outputs = self.funnel(
1303
+ input_ids,
1304
+ attention_mask=attention_mask,
1305
+ token_type_ids=token_type_ids,
1306
+ inputs_embeds=inputs_embeds,
1307
+ output_attentions=output_attentions,
1308
+ output_hidden_states=output_hidden_states,
1309
+ return_dict=return_dict,
1310
+ )
1311
+
1312
+ last_hidden_state = outputs[0]
1313
+ pooled_output = last_hidden_state[:, 0]
1314
+ logits = self.classifier(pooled_output)
1315
+
1316
+ loss = None
1317
+ if labels is not None:
1318
+ if self.config.problem_type is None:
1319
+ if self.num_labels == 1:
1320
+ self.config.problem_type = "regression"
1321
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1322
+ self.config.problem_type = "single_label_classification"
1323
+ else:
1324
+ self.config.problem_type = "multi_label_classification"
1325
+
1326
+ if self.config.problem_type == "regression":
1327
+ loss_fct = MSELoss()
1328
+ if self.num_labels == 1:
1329
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1330
+ else:
1331
+ loss = loss_fct(logits, labels)
1332
+ elif self.config.problem_type == "single_label_classification":
1333
+ loss_fct = CrossEntropyLoss()
1334
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1335
+ elif self.config.problem_type == "multi_label_classification":
1336
+ loss_fct = BCEWithLogitsLoss()
1337
+ loss = loss_fct(logits, labels)
1338
+
1339
+ if not return_dict:
1340
+ output = (logits,) + outputs[1:]
1341
+ return ((loss,) + output) if loss is not None else output
1342
+
1343
+ return SequenceClassifierOutput(
1344
+ loss=loss,
1345
+ logits=logits,
1346
+ hidden_states=outputs.hidden_states,
1347
+ attentions=outputs.attentions,
1348
+ )
1349
+
1350
+
1351
+ @add_start_docstrings(
1352
+ """
1353
+ Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first
1354
+ timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.
1355
+ """,
1356
+ FUNNEL_START_DOCSTRING,
1357
+ )
1358
+ class FunnelForMultipleChoice(FunnelPreTrainedModel):
1359
+ def __init__(self, config: FunnelConfig) -> None:
1360
+ super().__init__(config)
1361
+
1362
+ self.funnel = FunnelBaseModel(config)
1363
+ self.classifier = FunnelClassificationHead(config, 1)
1364
+ # Initialize weights and apply final processing
1365
+ self.post_init()
1366
+
1367
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1368
+ @add_code_sample_docstrings(
1369
+ checkpoint="funnel-transformer/small-base",
1370
+ output_type=MultipleChoiceModelOutput,
1371
+ config_class=_CONFIG_FOR_DOC,
1372
+ )
1373
+ def forward(
1374
+ self,
1375
+ input_ids: Optional[torch.Tensor] = None,
1376
+ attention_mask: Optional[torch.Tensor] = None,
1377
+ token_type_ids: Optional[torch.Tensor] = None,
1378
+ inputs_embeds: Optional[torch.Tensor] = None,
1379
+ labels: Optional[torch.Tensor] = None,
1380
+ output_attentions: Optional[bool] = None,
1381
+ output_hidden_states: Optional[bool] = None,
1382
+ return_dict: Optional[bool] = None,
1383
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1384
+ r"""
1385
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1386
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1387
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1388
+ `input_ids` above)
1389
+ """
1390
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1391
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1392
+
1393
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1394
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1395
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1396
+ inputs_embeds = (
1397
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1398
+ if inputs_embeds is not None
1399
+ else None
1400
+ )
1401
+
1402
+ outputs = self.funnel(
1403
+ input_ids,
1404
+ attention_mask=attention_mask,
1405
+ token_type_ids=token_type_ids,
1406
+ inputs_embeds=inputs_embeds,
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ )
1411
+
1412
+ last_hidden_state = outputs[0]
1413
+ pooled_output = last_hidden_state[:, 0]
1414
+ logits = self.classifier(pooled_output)
1415
+ reshaped_logits = logits.view(-1, num_choices)
1416
+
1417
+ loss = None
1418
+ if labels is not None:
1419
+ loss_fct = CrossEntropyLoss()
1420
+ loss = loss_fct(reshaped_logits, labels)
1421
+
1422
+ if not return_dict:
1423
+ output = (reshaped_logits,) + outputs[1:]
1424
+ return ((loss,) + output) if loss is not None else output
1425
+
1426
+ return MultipleChoiceModelOutput(
1427
+ loss=loss,
1428
+ logits=reshaped_logits,
1429
+ hidden_states=outputs.hidden_states,
1430
+ attentions=outputs.attentions,
1431
+ )
1432
+
1433
+
1434
+ @add_start_docstrings(
1435
+ """
1436
+ Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states
1437
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1438
+ """,
1439
+ FUNNEL_START_DOCSTRING,
1440
+ )
1441
+ class FunnelForTokenClassification(FunnelPreTrainedModel):
1442
+ def __init__(self, config: FunnelConfig) -> None:
1443
+ super().__init__(config)
1444
+ self.num_labels = config.num_labels
1445
+
1446
+ self.funnel = FunnelModel(config)
1447
+ self.dropout = nn.Dropout(config.hidden_dropout)
1448
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1449
+
1450
+ # Initialize weights and apply final processing
1451
+ self.post_init()
1452
+
1453
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1454
+ @add_code_sample_docstrings(
1455
+ checkpoint=_CHECKPOINT_FOR_DOC,
1456
+ output_type=TokenClassifierOutput,
1457
+ config_class=_CONFIG_FOR_DOC,
1458
+ )
1459
+ def forward(
1460
+ self,
1461
+ input_ids: Optional[torch.Tensor] = None,
1462
+ attention_mask: Optional[torch.Tensor] = None,
1463
+ token_type_ids: Optional[torch.Tensor] = None,
1464
+ inputs_embeds: Optional[torch.Tensor] = None,
1465
+ labels: Optional[torch.Tensor] = None,
1466
+ output_attentions: Optional[bool] = None,
1467
+ output_hidden_states: Optional[bool] = None,
1468
+ return_dict: Optional[bool] = None,
1469
+ ) -> Union[Tuple, TokenClassifierOutput]:
1470
+ r"""
1471
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1472
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1473
+ """
1474
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1475
+
1476
+ outputs = self.funnel(
1477
+ input_ids,
1478
+ attention_mask=attention_mask,
1479
+ token_type_ids=token_type_ids,
1480
+ inputs_embeds=inputs_embeds,
1481
+ output_attentions=output_attentions,
1482
+ output_hidden_states=output_hidden_states,
1483
+ return_dict=return_dict,
1484
+ )
1485
+
1486
+ last_hidden_state = outputs[0]
1487
+ last_hidden_state = self.dropout(last_hidden_state)
1488
+ logits = self.classifier(last_hidden_state)
1489
+
1490
+ loss = None
1491
+ if labels is not None:
1492
+ loss_fct = CrossEntropyLoss()
1493
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1494
+
1495
+ if not return_dict:
1496
+ output = (logits,) + outputs[1:]
1497
+ return ((loss,) + output) if loss is not None else output
1498
+
1499
+ return TokenClassifierOutput(
1500
+ loss=loss,
1501
+ logits=logits,
1502
+ hidden_states=outputs.hidden_states,
1503
+ attentions=outputs.attentions,
1504
+ )
1505
+
1506
+
1507
+ @add_start_docstrings(
1508
+ """
1509
+ Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD
1510
+ (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1511
+ """,
1512
+ FUNNEL_START_DOCSTRING,
1513
+ )
1514
+ class FunnelForQuestionAnswering(FunnelPreTrainedModel):
1515
+ def __init__(self, config: FunnelConfig) -> None:
1516
+ super().__init__(config)
1517
+ self.num_labels = config.num_labels
1518
+
1519
+ self.funnel = FunnelModel(config)
1520
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1521
+
1522
+ # Initialize weights and apply final processing
1523
+ self.post_init()
1524
+
1525
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1526
+ @add_code_sample_docstrings(
1527
+ checkpoint=_CHECKPOINT_FOR_DOC,
1528
+ output_type=QuestionAnsweringModelOutput,
1529
+ config_class=_CONFIG_FOR_DOC,
1530
+ )
1531
+ def forward(
1532
+ self,
1533
+ input_ids: Optional[torch.Tensor] = None,
1534
+ attention_mask: Optional[torch.Tensor] = None,
1535
+ token_type_ids: Optional[torch.Tensor] = None,
1536
+ inputs_embeds: Optional[torch.Tensor] = None,
1537
+ start_positions: Optional[torch.Tensor] = None,
1538
+ end_positions: Optional[torch.Tensor] = None,
1539
+ output_attentions: Optional[bool] = None,
1540
+ output_hidden_states: Optional[bool] = None,
1541
+ return_dict: Optional[bool] = None,
1542
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1543
+ r"""
1544
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1545
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1546
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1547
+ are not taken into account for computing the loss.
1548
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1549
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1550
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1551
+ are not taken into account for computing the loss.
1552
+ """
1553
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1554
+
1555
+ outputs = self.funnel(
1556
+ input_ids,
1557
+ attention_mask=attention_mask,
1558
+ token_type_ids=token_type_ids,
1559
+ inputs_embeds=inputs_embeds,
1560
+ output_attentions=output_attentions,
1561
+ output_hidden_states=output_hidden_states,
1562
+ return_dict=return_dict,
1563
+ )
1564
+
1565
+ last_hidden_state = outputs[0]
1566
+
1567
+ logits = self.qa_outputs(last_hidden_state)
1568
+ start_logits, end_logits = logits.split(1, dim=-1)
1569
+ start_logits = start_logits.squeeze(-1).contiguous()
1570
+ end_logits = end_logits.squeeze(-1).contiguous()
1571
+
1572
+ total_loss = None
1573
+ if start_positions is not None and end_positions is not None:
1574
+ # If we are on multi-GPU, split add a dimension
1575
+ if len(start_positions.size()) > 1:
1576
+ start_positions = start_positions.squeze(-1)
1577
+ if len(end_positions.size()) > 1:
1578
+ end_positions = end_positions.squeeze(-1)
1579
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1580
+ ignored_index = start_logits.size(1)
1581
+ start_positions = start_positions.clamp(0, ignored_index)
1582
+ end_positions = end_positions.clamp(0, ignored_index)
1583
+
1584
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1585
+ start_loss = loss_fct(start_logits, start_positions)
1586
+ end_loss = loss_fct(end_logits, end_positions)
1587
+ total_loss = (start_loss + end_loss) / 2
1588
+
1589
+ if not return_dict:
1590
+ output = (start_logits, end_logits) + outputs[1:]
1591
+ return ((total_loss,) + output) if total_loss is not None else output
1592
+
1593
+ return QuestionAnsweringModelOutput(
1594
+ loss=total_loss,
1595
+ start_logits=start_logits,
1596
+ end_logits=end_logits,
1597
+ hidden_states=outputs.hidden_states,
1598
+ attentions=outputs.attentions,
1599
+ )
venv/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py ADDED
@@ -0,0 +1,1871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Funnel model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...activations_tf import get_tf_activation
28
+ from ...modeling_tf_outputs import (
29
+ TFBaseModelOutput,
30
+ TFMaskedLMOutput,
31
+ TFMultipleChoiceModelOutput,
32
+ TFQuestionAnsweringModelOutput,
33
+ TFSequenceClassifierOutput,
34
+ TFTokenClassifierOutput,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFMaskedLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFMultipleChoiceLoss,
40
+ TFPreTrainedModel,
41
+ TFQuestionAnsweringLoss,
42
+ TFSequenceClassificationLoss,
43
+ TFTokenClassificationLoss,
44
+ get_initializer,
45
+ keras,
46
+ keras_serializable,
47
+ unpack_inputs,
48
+ )
49
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
50
+ from ...utils import (
51
+ ModelOutput,
52
+ add_code_sample_docstrings,
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ logging,
56
+ replace_return_docstrings,
57
+ )
58
+ from .configuration_funnel import FunnelConfig
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+
63
+ _CONFIG_FOR_DOC = "FunnelConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ INF = 1e6
70
+
71
+
72
+ class TFFunnelEmbeddings(keras.layers.Layer):
73
+ """Construct the embeddings from word, position and token_type embeddings."""
74
+
75
+ def __init__(self, config, **kwargs):
76
+ super().__init__(**kwargs)
77
+
78
+ self.config = config
79
+ self.hidden_size = config.hidden_size
80
+ self.initializer_std = 1.0 if config.initializer_std is None else config.initializer_std
81
+
82
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
83
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout)
84
+
85
+ def build(self, input_shape=None):
86
+ with tf.name_scope("word_embeddings"):
87
+ self.weight = self.add_weight(
88
+ name="weight",
89
+ shape=[self.config.vocab_size, self.hidden_size],
90
+ initializer=get_initializer(initializer_range=self.initializer_std),
91
+ )
92
+
93
+ if self.built:
94
+ return
95
+ self.built = True
96
+ if getattr(self, "LayerNorm", None) is not None:
97
+ with tf.name_scope(self.LayerNorm.name):
98
+ self.LayerNorm.build([None, None, self.config.d_model])
99
+
100
+ def call(self, input_ids=None, inputs_embeds=None, training=False):
101
+ """
102
+ Applies embedding based on inputs tensor.
103
+
104
+ Returns:
105
+ final_embeddings (`tf.Tensor`): output embedding tensor.
106
+ """
107
+ assert not (input_ids is None and inputs_embeds is None)
108
+ assert not (input_ids is not None and inputs_embeds is not None)
109
+
110
+ if input_ids is not None:
111
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
112
+ inputs_embeds = tf.gather(self.weight, input_ids)
113
+
114
+ final_embeddings = self.LayerNorm(inputs=inputs_embeds)
115
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
116
+
117
+ return final_embeddings
118
+
119
+
120
+ class TFFunnelAttentionStructure:
121
+ """
122
+ Contains helpers for `TFFunnelRelMultiheadAttention `.
123
+ """
124
+
125
+ cls_token_type_id: int = 2
126
+
127
+ def __init__(self, config):
128
+ self.d_model = config.d_model
129
+ self.attention_type = config.attention_type
130
+ self.num_blocks = config.num_blocks
131
+ self.separate_cls = config.separate_cls
132
+ self.truncate_seq = config.truncate_seq
133
+ self.pool_q_only = config.pool_q_only
134
+ self.pooling_type = config.pooling_type
135
+
136
+ self.sin_dropout = keras.layers.Dropout(config.hidden_dropout)
137
+ self.cos_dropout = keras.layers.Dropout(config.hidden_dropout)
138
+ # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was
139
+ # divided.
140
+ self.pooling_mult = None
141
+
142
+ def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None, training=False):
143
+ """Returns the attention inputs associated to the inputs of the model."""
144
+ # inputs_embeds has shape batch_size x seq_len x d_model
145
+ # attention_mask and token_type_ids have shape batch_size x seq_len
146
+ self.pooling_mult = 1
147
+ self.seq_len = seq_len = shape_list(inputs_embeds)[1]
148
+ position_embeds = self.get_position_embeds(seq_len, training=training)
149
+ token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
150
+ cls_mask = (
151
+ tf.pad(tf.ones([seq_len - 1, seq_len - 1], dtype=inputs_embeds.dtype), [[1, 0], [1, 0]])
152
+ if self.separate_cls
153
+ else None
154
+ )
155
+ return (position_embeds, token_type_mat, attention_mask, cls_mask)
156
+
157
+ def token_type_ids_to_mat(self, token_type_ids):
158
+ """Convert `token_type_ids` to `token_type_mat`."""
159
+ token_type_mat = tf.equal(tf.expand_dims(token_type_ids, -1), tf.expand_dims(token_type_ids, -2))
160
+ # Treat <cls> as in the same segment as both A & B
161
+ cls_ids = tf.equal(token_type_ids, tf.constant([self.cls_token_type_id], dtype=token_type_ids.dtype))
162
+ cls_mat = tf.logical_or(tf.expand_dims(cls_ids, -1), tf.expand_dims(cls_ids, -2))
163
+ return tf.logical_or(cls_mat, token_type_mat)
164
+
165
+ def get_position_embeds(self, seq_len, training=False):
166
+ """
167
+ Create and cache inputs related to relative position encoding. Those are very different depending on whether we
168
+ are using the factorized or the relative shift attention:
169
+
170
+ For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,
171
+ final formula.
172
+
173
+ For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final
174
+ formula.
175
+
176
+ Paper link: https://arxiv.org/abs/2006.03236
177
+ """
178
+ if self.attention_type == "factorized":
179
+ # Notations from the paper, appending A.2.2, final formula.
180
+ # We need to create and return the matrices phi, psi, pi and omega.
181
+ pos_seq = tf.range(0, seq_len, 1.0)
182
+ freq_seq = tf.range(0, self.d_model // 2, 1.0)
183
+ inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2)))
184
+ sinusoid = tf.einsum("i,d->id", pos_seq, inv_freq)
185
+
186
+ sin_embed = tf.sin(sinusoid)
187
+ sin_embed_d = self.sin_dropout(sin_embed, training=training)
188
+ cos_embed = tf.cos(sinusoid)
189
+ cos_embed_d = self.cos_dropout(cos_embed, training=training)
190
+ # This is different from the formula on the paper...
191
+ phi = tf.concat([sin_embed_d, sin_embed_d], axis=-1)
192
+ psi = tf.concat([cos_embed, sin_embed], axis=-1)
193
+ pi = tf.concat([cos_embed_d, cos_embed_d], axis=-1)
194
+ omega = tf.concat([-sin_embed, cos_embed], axis=-1)
195
+ return (phi, pi, psi, omega)
196
+ else:
197
+ # Notations from the paper, appending A.2.1, final formula.
198
+ # We need to create and return all the possible vectors R for all blocks and shifts.
199
+ freq_seq = tf.range(0, self.d_model // 2, 1.0)
200
+ inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2)))
201
+ # Maximum relative positions for the first input
202
+ rel_pos_id = tf.range(-seq_len * 2, seq_len * 2, 1.0)
203
+ zero_offset = seq_len * tf.constant(2)
204
+ sinusoid = tf.einsum("i,d->id", rel_pos_id, inv_freq)
205
+ sin_embed = self.sin_dropout(tf.sin(sinusoid), training=training)
206
+ cos_embed = self.cos_dropout(tf.cos(sinusoid), training=training)
207
+ pos_embed = tf.concat([sin_embed, cos_embed], axis=-1)
208
+
209
+ pos = tf.range(0, seq_len)
210
+ pooled_pos = pos
211
+ position_embeds_list = []
212
+ for block_index in range(0, self.num_blocks):
213
+ # For each block with block_index > 0, we need two types position embeddings:
214
+ # - Attention(pooled-q, unpooled-kv)
215
+ # - Attention(pooled-q, pooled-kv)
216
+ # For block_index = 0 we only need the second one and leave the first one as None.
217
+
218
+ # First type
219
+ position_embeds_pooling = tf.fill([1], value=-1.0)
220
+
221
+ if block_index != 0:
222
+ pooled_pos = self.stride_pool_pos(pos, block_index)
223
+
224
+ # construct rel_pos_id
225
+ stride = 2 ** (block_index - 1)
226
+ rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
227
+ # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset
228
+ # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model))
229
+ rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype)
230
+ rel_pos = rel_pos + zero_offset
231
+ position_embeds_pooling = tf.gather(pos_embed, rel_pos, axis=0)
232
+
233
+ # Second type
234
+ pos = pooled_pos
235
+ stride = 2**block_index
236
+ rel_pos = self.relative_pos(pos, stride)
237
+
238
+ # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset
239
+ # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model))
240
+ rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype)
241
+ rel_pos = rel_pos + zero_offset
242
+ tf.debugging.assert_less(rel_pos, tf.shape(pos_embed)[0])
243
+ position_embeds_no_pooling = tf.gather(pos_embed, rel_pos, axis=0)
244
+
245
+ position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
246
+ return position_embeds_list
247
+
248
+ def stride_pool_pos(self, pos_id, block_index):
249
+ """
250
+ Pool `pos_id` while keeping the cls token separate (if `self.separate_cls=True`).
251
+ """
252
+ if self.separate_cls:
253
+ # Under separate <cls>, we treat the <cls> as the first token in
254
+ # the previous block of the 1st real block. Since the 1st real
255
+ # block always has position 1, the position of the previous block
256
+ # will be at `1 - 2 ** block_index`.
257
+ cls_pos = tf.constant([-(2**block_index) + 1], dtype=pos_id.dtype)
258
+ pooled_pos_id = pos_id[1:-1] if self.truncate_seq else pos_id[1:]
259
+ return tf.concat([cls_pos, pooled_pos_id[::2]], 0)
260
+ else:
261
+ return pos_id[::2]
262
+
263
+ def relative_pos(self, pos, stride, pooled_pos=None, shift=1):
264
+ """
265
+ Build the relative positional vector between `pos` and `pooled_pos`.
266
+ """
267
+ if pooled_pos is None:
268
+ pooled_pos = pos
269
+
270
+ ref_point = pooled_pos[0] - pos[0]
271
+ num_remove = shift * shape_list(pooled_pos)[0]
272
+ max_dist = ref_point + num_remove * stride
273
+ min_dist = pooled_pos[0] - pos[-1]
274
+
275
+ return tf.range(max_dist, min_dist - 1, -stride)
276
+
277
+ def stride_pool(self, tensor, axis):
278
+ """
279
+ Perform pooling by stride slicing the tensor along the given axis.
280
+ """
281
+ if tensor is None:
282
+ return None
283
+
284
+ # Do the stride pool recursively if axis is a list or a tuple of ints.
285
+ if isinstance(axis, (list, tuple)):
286
+ for ax in axis:
287
+ tensor = self.stride_pool(tensor, ax)
288
+ return tensor
289
+
290
+ # Do the stride pool recursively if tensor is a list or tuple of tensors.
291
+ if isinstance(tensor, (tuple, list)):
292
+ return type(tensor)(self.stride_pool(x, axis) for x in tensor)
293
+
294
+ # Deal with negative axis
295
+ axis %= len(shape_list(tensor))
296
+
297
+ axis_slice = slice(None, -1, 2) if self.separate_cls and self.truncate_seq else slice(None, None, 2)
298
+ enc_slice = [slice(None)] * axis + [axis_slice]
299
+ if self.separate_cls:
300
+ cls_slice = [slice(None)] * axis + [slice(None, 1)]
301
+ tensor = tf.concat([tensor[cls_slice], tensor], axis)
302
+ return tensor[enc_slice]
303
+
304
+ def pool_tensor(self, tensor, mode="mean", stride=2):
305
+ """Apply 1D pooling to a tensor of size [B x T (x H)]."""
306
+ if tensor is None:
307
+ return None
308
+
309
+ # Do the pool recursively if tensor is a list or tuple of tensors.
310
+ if isinstance(tensor, (tuple, list)):
311
+ return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
312
+
313
+ if self.separate_cls:
314
+ suffix = tensor[:, :-1] if self.truncate_seq else tensor
315
+ tensor = tf.concat([tensor[:, :1], suffix], axis=1)
316
+
317
+ ndim = len(shape_list(tensor))
318
+ if ndim == 2:
319
+ tensor = tensor[:, :, None]
320
+
321
+ if mode == "mean":
322
+ tensor = tf.nn.avg_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME")
323
+ elif mode == "max":
324
+ tensor = tf.nn.max_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME")
325
+ elif mode == "min":
326
+ tensor = -tf.nn.max_pool1d(-tensor, stride, strides=stride, data_format="NWC", padding="SAME")
327
+ else:
328
+ raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
329
+
330
+ return tf.squeeze(tensor, 2) if ndim == 2 else tensor
331
+
332
+ def pre_attention_pooling(self, output, attention_inputs):
333
+ """Pool `output` and the proper parts of `attention_inputs` before the attention layer."""
334
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
335
+ if self.pool_q_only:
336
+ if self.attention_type == "factorized":
337
+ position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
338
+ token_type_mat = self.stride_pool(token_type_mat, 1)
339
+ cls_mask = self.stride_pool(cls_mask, 0)
340
+ output = self.pool_tensor(output, mode=self.pooling_type)
341
+ else:
342
+ self.pooling_mult *= 2
343
+ if self.attention_type == "factorized":
344
+ position_embeds = self.stride_pool(position_embeds, 0)
345
+ token_type_mat = self.stride_pool(token_type_mat, [1, 2])
346
+ cls_mask = self.stride_pool(cls_mask, [1, 2])
347
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
348
+ output = self.pool_tensor(output, mode=self.pooling_type)
349
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
350
+ return output, attention_inputs
351
+
352
+ def post_attention_pooling(self, attention_inputs):
353
+ """Pool the proper parts of `attention_inputs` after the attention layer."""
354
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
355
+ if self.pool_q_only:
356
+ self.pooling_mult *= 2
357
+ if self.attention_type == "factorized":
358
+ position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
359
+ token_type_mat = self.stride_pool(token_type_mat, 2)
360
+ cls_mask = self.stride_pool(cls_mask, 1)
361
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
362
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
363
+ return attention_inputs
364
+
365
+
366
+ def _relative_shift_gather(positional_attn, context_len, shift):
367
+ batch_size, n_head, seq_len, max_rel_len = shape_list(positional_attn)
368
+ # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
369
+
370
+ # What's next is the same as doing the following gather in PyTorch, which might be clearer code but less efficient.
371
+ # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
372
+ # # matrix of context_len + i-j
373
+ # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
374
+
375
+ positional_attn = tf.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
376
+ positional_attn = positional_attn[:, :, shift:, :]
377
+ positional_attn = tf.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])
378
+ positional_attn = positional_attn[..., :context_len]
379
+ return positional_attn
380
+
381
+
382
+ class TFFunnelRelMultiheadAttention(keras.layers.Layer):
383
+ def __init__(self, config, block_index, **kwargs):
384
+ super().__init__(**kwargs)
385
+ self.attention_type = config.attention_type
386
+ self.n_head = n_head = config.n_head
387
+ self.d_head = d_head = config.d_head
388
+ self.d_model = d_model = config.d_model
389
+ self.initializer_range = config.initializer_range
390
+ self.block_index = block_index
391
+
392
+ self.hidden_dropout = keras.layers.Dropout(config.hidden_dropout)
393
+ self.attention_dropout = keras.layers.Dropout(config.attention_dropout)
394
+
395
+ initializer = get_initializer(config.initializer_range)
396
+
397
+ self.q_head = keras.layers.Dense(
398
+ n_head * d_head, use_bias=False, kernel_initializer=initializer, name="q_head"
399
+ )
400
+ self.k_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="k_head")
401
+ self.v_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="v_head")
402
+
403
+ self.post_proj = keras.layers.Dense(d_model, kernel_initializer=initializer, name="post_proj")
404
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
405
+ self.scale = 1.0 / (d_head**0.5)
406
+
407
+ def build(self, input_shape=None):
408
+ n_head, d_head, d_model = self.n_head, self.d_head, self.d_model
409
+ initializer = get_initializer(self.initializer_range)
410
+
411
+ self.r_w_bias = self.add_weight(
412
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_w_bias"
413
+ )
414
+ self.r_r_bias = self.add_weight(
415
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_r_bias"
416
+ )
417
+ self.r_kernel = self.add_weight(
418
+ shape=(d_model, n_head, d_head), initializer=initializer, trainable=True, name="r_kernel"
419
+ )
420
+ self.r_s_bias = self.add_weight(
421
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_s_bias"
422
+ )
423
+ self.seg_embed = self.add_weight(
424
+ shape=(2, n_head, d_head), initializer=initializer, trainable=True, name="seg_embed"
425
+ )
426
+
427
+ if self.built:
428
+ return
429
+ self.built = True
430
+ if getattr(self, "q_head", None) is not None:
431
+ with tf.name_scope(self.q_head.name):
432
+ self.q_head.build([None, None, d_model])
433
+ if getattr(self, "k_head", None) is not None:
434
+ with tf.name_scope(self.k_head.name):
435
+ self.k_head.build([None, None, d_model])
436
+ if getattr(self, "v_head", None) is not None:
437
+ with tf.name_scope(self.v_head.name):
438
+ self.v_head.build([None, None, d_model])
439
+ if getattr(self, "post_proj", None) is not None:
440
+ with tf.name_scope(self.post_proj.name):
441
+ self.post_proj.build([None, None, n_head * d_head])
442
+ if getattr(self, "layer_norm", None) is not None:
443
+ with tf.name_scope(self.layer_norm.name):
444
+ self.layer_norm.build([None, None, d_model])
445
+
446
+ def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
447
+ """Relative attention score for the positional encodings"""
448
+ # q_head has shape batch_size x sea_len x n_head x d_head
449
+ if self.attention_type == "factorized":
450
+ # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)
451
+ # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model
452
+ phi, pi, psi, omega = position_embeds
453
+ # Shape n_head x d_head
454
+ u = self.r_r_bias * self.scale
455
+ # Shape d_model x n_head x d_head
456
+ w_r = self.r_kernel
457
+
458
+ # Shape batch_size x sea_len x n_head x d_model
459
+ q_r_attention = tf.einsum("binh,dnh->bind", q_head + u, w_r)
460
+ q_r_attention_1 = q_r_attention * phi[:, None]
461
+ q_r_attention_2 = q_r_attention * pi[:, None]
462
+
463
+ # Shape batch_size x n_head x seq_len x context_len
464
+ positional_attn = tf.einsum("bind,jd->bnij", q_r_attention_1, psi) + tf.einsum(
465
+ "bind,jd->bnij", q_r_attention_2, omega
466
+ )
467
+ else:
468
+ # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
469
+ # Grab the proper positional encoding, shape max_rel_len x d_model
470
+ if shape_list(q_head)[1] != context_len:
471
+ shift = 2
472
+ r = position_embeds[self.block_index][1]
473
+ else:
474
+ shift = 1
475
+ r = position_embeds[self.block_index][0]
476
+ # Shape n_head x d_head
477
+ v = self.r_r_bias * self.scale
478
+ # Shape d_model x n_head x d_head
479
+ w_r = self.r_kernel
480
+
481
+ # Shape max_rel_len x n_head x d_model
482
+ r_head = tf.einsum("td,dnh->tnh", r, w_r)
483
+ # Shape batch_size x n_head x seq_len x max_rel_len
484
+ positional_attn = tf.einsum("binh,tnh->bnit", q_head + v, r_head)
485
+ # Shape batch_size x n_head x seq_len x context_len
486
+ positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
487
+
488
+ if cls_mask is not None:
489
+ positional_attn *= cls_mask
490
+ return positional_attn
491
+
492
+ def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
493
+ """Relative attention score for the token_type_ids"""
494
+ if token_type_mat is None:
495
+ return 0
496
+ batch_size, seq_len, context_len = shape_list(token_type_mat)
497
+ # q_head has shape batch_size x seq_len x n_head x d_head
498
+ # Shape n_head x d_head
499
+ r_s_bias = self.r_s_bias * self.scale
500
+
501
+ # Shape batch_size x n_head x seq_len x 2
502
+ token_type_bias = tf.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
503
+ # Shape batch_size x n_head x seq_len x context_len
504
+ token_type_mat = tf.tile(token_type_mat[:, None], [1, shape_list(q_head)[2], 1, 1])
505
+ # token_type_mat = tf.broadcast_to(token_type_mat[:, None], new_shape)
506
+ # Shapes batch_size x n_head x seq_len
507
+ diff_token_type, same_token_type = tf.split(token_type_bias, 2, axis=-1)
508
+ # Shape batch_size x n_head x seq_len x context_len
509
+ token_type_attn = tf.where(
510
+ token_type_mat,
511
+ tf.tile(same_token_type, [1, 1, 1, context_len]),
512
+ tf.tile(diff_token_type, [1, 1, 1, context_len]),
513
+ )
514
+
515
+ if cls_mask is not None:
516
+ token_type_attn *= cls_mask
517
+ return token_type_attn
518
+
519
+ def call(self, query, key, value, attention_inputs, output_attentions=False, training=False):
520
+ # query has shape batch_size x seq_len x d_model
521
+ # key and value have shapes batch_size x context_len x d_model
522
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
523
+
524
+ batch_size, seq_len, _ = shape_list(query)
525
+ context_len = shape_list(key)[1]
526
+ n_head, d_head = self.n_head, self.d_head
527
+
528
+ # Shape batch_size x seq_len x n_head x d_head
529
+ q_head = tf.reshape(self.q_head(query), [batch_size, seq_len, n_head, d_head])
530
+ # Shapes batch_size x context_len x n_head x d_head
531
+ k_head = tf.reshape(self.k_head(key), [batch_size, context_len, n_head, d_head])
532
+ v_head = tf.reshape(self.v_head(value), [batch_size, context_len, n_head, d_head])
533
+
534
+ q_head = q_head * self.scale
535
+ # Shape n_head x d_head
536
+ r_w_bias = self.r_w_bias * self.scale
537
+ # Shapes batch_size x n_head x seq_len x context_len
538
+ content_score = tf.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
539
+ positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)
540
+ token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
541
+
542
+ # merge attention scores
543
+ attn_score = content_score + positional_attn + token_type_attn
544
+
545
+ # perform masking
546
+ if attention_mask is not None:
547
+ attention_mask = tf.cast(attention_mask, dtype=attn_score.dtype)
548
+ attn_score = attn_score - (INF * (1 - attention_mask[:, None, None]))
549
+
550
+ # attention probability
551
+ attn_prob = stable_softmax(attn_score, axis=-1)
552
+ attn_prob = self.attention_dropout(attn_prob, training=training)
553
+
554
+ # attention output, shape batch_size x seq_len x n_head x d_head
555
+ attn_vec = tf.einsum("bnij,bjnd->bind", attn_prob, v_head)
556
+
557
+ # Shape shape batch_size x seq_len x d_model
558
+ attn_out = self.post_proj(tf.reshape(attn_vec, [batch_size, seq_len, n_head * d_head]))
559
+ attn_out = self.hidden_dropout(attn_out, training=training)
560
+
561
+ output = self.layer_norm(query + attn_out)
562
+ return (output, attn_prob) if output_attentions else (output,)
563
+
564
+
565
+ class TFFunnelPositionwiseFFN(keras.layers.Layer):
566
+ def __init__(self, config, **kwargs):
567
+ super().__init__(**kwargs)
568
+ initializer = get_initializer(config.initializer_range)
569
+ self.linear_1 = keras.layers.Dense(config.d_inner, kernel_initializer=initializer, name="linear_1")
570
+ self.activation_function = get_tf_activation(config.hidden_act)
571
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
572
+ self.linear_2 = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_2")
573
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
574
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
575
+ self.config = config
576
+
577
+ def call(self, hidden, training=False):
578
+ h = self.linear_1(hidden)
579
+ h = self.activation_function(h)
580
+ h = self.activation_dropout(h, training=training)
581
+ h = self.linear_2(h)
582
+ h = self.dropout(h, training=training)
583
+ return self.layer_norm(hidden + h)
584
+
585
+ def build(self, input_shape=None):
586
+ if self.built:
587
+ return
588
+ self.built = True
589
+ if getattr(self, "linear_1", None) is not None:
590
+ with tf.name_scope(self.linear_1.name):
591
+ self.linear_1.build([None, None, self.config.d_model])
592
+ if getattr(self, "linear_2", None) is not None:
593
+ with tf.name_scope(self.linear_2.name):
594
+ self.linear_2.build([None, None, self.config.d_inner])
595
+ if getattr(self, "layer_norm", None) is not None:
596
+ with tf.name_scope(self.layer_norm.name):
597
+ self.layer_norm.build([None, None, self.config.d_model])
598
+
599
+
600
+ class TFFunnelLayer(keras.layers.Layer):
601
+ def __init__(self, config, block_index, **kwargs):
602
+ super().__init__(**kwargs)
603
+ self.attention = TFFunnelRelMultiheadAttention(config, block_index, name="attention")
604
+ self.ffn = TFFunnelPositionwiseFFN(config, name="ffn")
605
+
606
+ def call(self, query, key, value, attention_inputs, output_attentions=False, training=False):
607
+ attn = self.attention(
608
+ query, key, value, attention_inputs, output_attentions=output_attentions, training=training
609
+ )
610
+ output = self.ffn(attn[0], training=training)
611
+ return (output, attn[1]) if output_attentions else (output,)
612
+
613
+ def build(self, input_shape=None):
614
+ if self.built:
615
+ return
616
+ self.built = True
617
+ if getattr(self, "attention", None) is not None:
618
+ with tf.name_scope(self.attention.name):
619
+ self.attention.build(None)
620
+ if getattr(self, "ffn", None) is not None:
621
+ with tf.name_scope(self.ffn.name):
622
+ self.ffn.build(None)
623
+
624
+
625
+ class TFFunnelEncoder(keras.layers.Layer):
626
+ def __init__(self, config, **kwargs):
627
+ super().__init__(**kwargs)
628
+ self.separate_cls = config.separate_cls
629
+ self.pool_q_only = config.pool_q_only
630
+ self.block_repeats = config.block_repeats
631
+ self.attention_structure = TFFunnelAttentionStructure(config)
632
+ self.blocks = [
633
+ [TFFunnelLayer(config, block_index, name=f"blocks_._{block_index}_._{i}") for i in range(block_size)]
634
+ for block_index, block_size in enumerate(config.block_sizes)
635
+ ]
636
+
637
+ def call(
638
+ self,
639
+ inputs_embeds,
640
+ attention_mask=None,
641
+ token_type_ids=None,
642
+ output_attentions=False,
643
+ output_hidden_states=False,
644
+ return_dict=True,
645
+ training=False,
646
+ ):
647
+ # The pooling is not implemented on long tensors, so we convert this mask.
648
+ # attention_mask = tf.cast(attention_mask, inputs_embeds.dtype)
649
+ attention_inputs = self.attention_structure.init_attention_inputs(
650
+ inputs_embeds,
651
+ attention_mask=attention_mask,
652
+ token_type_ids=token_type_ids,
653
+ training=training,
654
+ )
655
+ hidden = inputs_embeds
656
+
657
+ all_hidden_states = (inputs_embeds,) if output_hidden_states else None
658
+ all_attentions = () if output_attentions else None
659
+
660
+ for block_index, block in enumerate(self.blocks):
661
+ pooling_flag = shape_list(hidden)[1] > (2 if self.separate_cls else 1)
662
+ pooling_flag = pooling_flag and block_index > 0
663
+ pooled_hidden = tf.zeros(shape_list(hidden))
664
+
665
+ if pooling_flag:
666
+ pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(
667
+ hidden, attention_inputs
668
+ )
669
+
670
+ for layer_index, layer in enumerate(block):
671
+ for repeat_index in range(self.block_repeats[block_index]):
672
+ do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
673
+ if do_pooling:
674
+ query = pooled_hidden
675
+ key = value = hidden if self.pool_q_only else pooled_hidden
676
+ else:
677
+ query = key = value = hidden
678
+ layer_output = layer(
679
+ query, key, value, attention_inputs, output_attentions=output_attentions, training=training
680
+ )
681
+ hidden = layer_output[0]
682
+ if do_pooling:
683
+ attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)
684
+
685
+ if output_attentions:
686
+ all_attentions = all_attentions + layer_output[1:]
687
+ if output_hidden_states:
688
+ all_hidden_states = all_hidden_states + (hidden,)
689
+
690
+ if not return_dict:
691
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
692
+ return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
693
+
694
+ def build(self, input_shape=None):
695
+ if self.built:
696
+ return
697
+ self.built = True
698
+ for block in self.blocks:
699
+ for layer in block:
700
+ with tf.name_scope(layer.name):
701
+ layer.build(None)
702
+
703
+
704
+ def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False):
705
+ """
706
+ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
707
+ """
708
+ if stride == 1:
709
+ return x
710
+ if separate_cls:
711
+ cls = x[:, :1]
712
+ x = x[:, 1:]
713
+ output = tf.repeat(x, repeats=stride, axis=1)
714
+ if separate_cls:
715
+ if truncate_seq:
716
+ output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]])
717
+ output = output[:, : target_len - 1]
718
+ output = tf.concat([cls, output], axis=1)
719
+ else:
720
+ output = output[:, :target_len]
721
+ return output
722
+
723
+
724
+ class TFFunnelDecoder(keras.layers.Layer):
725
+ def __init__(self, config, **kwargs):
726
+ super().__init__(**kwargs)
727
+ self.separate_cls = config.separate_cls
728
+ self.truncate_seq = config.truncate_seq
729
+ self.stride = 2 ** (len(config.block_sizes) - 1)
730
+ self.attention_structure = TFFunnelAttentionStructure(config)
731
+ self.layers = [TFFunnelLayer(config, 0, name=f"layers_._{i}") for i in range(config.num_decoder_layers)]
732
+
733
+ def call(
734
+ self,
735
+ final_hidden,
736
+ first_block_hidden,
737
+ attention_mask=None,
738
+ token_type_ids=None,
739
+ output_attentions=False,
740
+ output_hidden_states=False,
741
+ return_dict=True,
742
+ training=False,
743
+ ):
744
+ upsampled_hidden = upsample(
745
+ final_hidden,
746
+ stride=self.stride,
747
+ target_len=shape_list(first_block_hidden)[1],
748
+ separate_cls=self.separate_cls,
749
+ truncate_seq=self.truncate_seq,
750
+ )
751
+
752
+ hidden = upsampled_hidden + first_block_hidden
753
+ all_hidden_states = (hidden,) if output_hidden_states else None
754
+ all_attentions = () if output_attentions else None
755
+
756
+ attention_inputs = self.attention_structure.init_attention_inputs(
757
+ hidden,
758
+ attention_mask=attention_mask,
759
+ token_type_ids=token_type_ids,
760
+ training=training,
761
+ )
762
+
763
+ for layer in self.layers:
764
+ layer_output = layer(
765
+ hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions, training=training
766
+ )
767
+ hidden = layer_output[0]
768
+
769
+ if output_attentions:
770
+ all_attentions = all_attentions + layer_output[1:]
771
+ if output_hidden_states:
772
+ all_hidden_states = all_hidden_states + (hidden,)
773
+
774
+ if not return_dict:
775
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
776
+ return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
777
+
778
+ def build(self, input_shape=None):
779
+ if self.built:
780
+ return
781
+ self.built = True
782
+ if getattr(self, "layers", None) is not None:
783
+ for layer in self.layers:
784
+ with tf.name_scope(layer.name):
785
+ layer.build(None)
786
+
787
+
788
+ @keras_serializable
789
+ class TFFunnelBaseLayer(keras.layers.Layer):
790
+ """Base model without decoder"""
791
+
792
+ config_class = FunnelConfig
793
+
794
+ def __init__(self, config, **kwargs):
795
+ super().__init__(**kwargs)
796
+
797
+ self.config = config
798
+ self.output_attentions = config.output_attentions
799
+ self.output_hidden_states = config.output_hidden_states
800
+ self.return_dict = config.use_return_dict
801
+
802
+ self.embeddings = TFFunnelEmbeddings(config, name="embeddings")
803
+ self.encoder = TFFunnelEncoder(config, name="encoder")
804
+
805
+ def get_input_embeddings(self):
806
+ return self.embeddings
807
+
808
+ def set_input_embeddings(self, value):
809
+ self.embeddings.weight = value
810
+ self.embeddings.vocab_size = shape_list(value)[0]
811
+
812
+ def _prune_heads(self, heads_to_prune):
813
+ raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
814
+
815
+ @unpack_inputs
816
+ def call(
817
+ self,
818
+ input_ids=None,
819
+ attention_mask=None,
820
+ token_type_ids=None,
821
+ inputs_embeds=None,
822
+ output_attentions=None,
823
+ output_hidden_states=None,
824
+ return_dict=None,
825
+ training=False,
826
+ ):
827
+ if input_ids is not None and inputs_embeds is not None:
828
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
829
+ elif input_ids is not None:
830
+ input_shape = shape_list(input_ids)
831
+ elif inputs_embeds is not None:
832
+ input_shape = shape_list(inputs_embeds)[:-1]
833
+ else:
834
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
835
+
836
+ if attention_mask is None:
837
+ attention_mask = tf.fill(input_shape, 1)
838
+
839
+ if token_type_ids is None:
840
+ token_type_ids = tf.fill(input_shape, 0)
841
+
842
+ if inputs_embeds is None:
843
+ inputs_embeds = self.embeddings(input_ids, training=training)
844
+
845
+ encoder_outputs = self.encoder(
846
+ inputs_embeds,
847
+ attention_mask=attention_mask,
848
+ token_type_ids=token_type_ids,
849
+ output_attentions=output_attentions,
850
+ output_hidden_states=output_hidden_states,
851
+ return_dict=return_dict,
852
+ training=training,
853
+ )
854
+
855
+ return encoder_outputs
856
+
857
+ def build(self, input_shape=None):
858
+ if self.built:
859
+ return
860
+ self.built = True
861
+ if getattr(self, "embeddings", None) is not None:
862
+ with tf.name_scope(self.embeddings.name):
863
+ self.embeddings.build(None)
864
+ if getattr(self, "encoder", None) is not None:
865
+ with tf.name_scope(self.encoder.name):
866
+ self.encoder.build(None)
867
+
868
+
869
+ @keras_serializable
870
+ class TFFunnelMainLayer(keras.layers.Layer):
871
+ """Base model with decoder"""
872
+
873
+ config_class = FunnelConfig
874
+
875
+ def __init__(self, config, **kwargs):
876
+ super().__init__(**kwargs)
877
+
878
+ self.config = config
879
+ self.block_sizes = config.block_sizes
880
+ self.output_attentions = config.output_attentions
881
+ self.output_hidden_states = config.output_hidden_states
882
+ self.return_dict = config.use_return_dict
883
+
884
+ self.embeddings = TFFunnelEmbeddings(config, name="embeddings")
885
+ self.encoder = TFFunnelEncoder(config, name="encoder")
886
+ self.decoder = TFFunnelDecoder(config, name="decoder")
887
+
888
+ def get_input_embeddings(self):
889
+ return self.embeddings
890
+
891
+ def set_input_embeddings(self, value):
892
+ self.embeddings.weight = value
893
+ self.embeddings.vocab_size = shape_list(value)[0]
894
+
895
+ def _prune_heads(self, heads_to_prune):
896
+ raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
897
+
898
+ @unpack_inputs
899
+ def call(
900
+ self,
901
+ input_ids=None,
902
+ attention_mask=None,
903
+ token_type_ids=None,
904
+ inputs_embeds=None,
905
+ output_attentions=None,
906
+ output_hidden_states=None,
907
+ return_dict=None,
908
+ training=False,
909
+ ):
910
+ if input_ids is not None and inputs_embeds is not None:
911
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
912
+ elif input_ids is not None:
913
+ input_shape = shape_list(input_ids)
914
+ elif inputs_embeds is not None:
915
+ input_shape = shape_list(inputs_embeds)[:-1]
916
+ else:
917
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
918
+
919
+ if attention_mask is None:
920
+ attention_mask = tf.fill(input_shape, 1)
921
+
922
+ if token_type_ids is None:
923
+ token_type_ids = tf.fill(input_shape, 0)
924
+
925
+ if inputs_embeds is None:
926
+ inputs_embeds = self.embeddings(input_ids, training=training)
927
+
928
+ encoder_outputs = self.encoder(
929
+ inputs_embeds,
930
+ attention_mask=attention_mask,
931
+ token_type_ids=token_type_ids,
932
+ output_attentions=output_attentions,
933
+ output_hidden_states=True,
934
+ return_dict=return_dict,
935
+ training=training,
936
+ )
937
+
938
+ decoder_outputs = self.decoder(
939
+ final_hidden=encoder_outputs[0],
940
+ first_block_hidden=encoder_outputs[1][self.block_sizes[0]],
941
+ attention_mask=attention_mask,
942
+ token_type_ids=token_type_ids,
943
+ output_attentions=output_attentions,
944
+ output_hidden_states=output_hidden_states,
945
+ return_dict=return_dict,
946
+ training=training,
947
+ )
948
+
949
+ if not return_dict:
950
+ idx = 0
951
+ outputs = (decoder_outputs[0],)
952
+ if output_hidden_states:
953
+ idx += 1
954
+ outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
955
+ if output_attentions:
956
+ idx += 1
957
+ outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
958
+ return outputs
959
+
960
+ return TFBaseModelOutput(
961
+ last_hidden_state=decoder_outputs[0],
962
+ hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)
963
+ if output_hidden_states
964
+ else None,
965
+ attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,
966
+ )
967
+
968
+ def build(self, input_shape=None):
969
+ if self.built:
970
+ return
971
+ self.built = True
972
+ if getattr(self, "embeddings", None) is not None:
973
+ with tf.name_scope(self.embeddings.name):
974
+ self.embeddings.build(None)
975
+ if getattr(self, "encoder", None) is not None:
976
+ with tf.name_scope(self.encoder.name):
977
+ self.encoder.build(None)
978
+ if getattr(self, "decoder", None) is not None:
979
+ with tf.name_scope(self.decoder.name):
980
+ self.decoder.build(None)
981
+
982
+
983
+ class TFFunnelDiscriminatorPredictions(keras.layers.Layer):
984
+ """Prediction module for the discriminator, made up of two dense layers."""
985
+
986
+ def __init__(self, config, **kwargs):
987
+ super().__init__(**kwargs)
988
+ initializer = get_initializer(config.initializer_range)
989
+ self.dense = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="dense")
990
+ self.activation_function = get_tf_activation(config.hidden_act)
991
+ self.dense_prediction = keras.layers.Dense(1, kernel_initializer=initializer, name="dense_prediction")
992
+ self.config = config
993
+
994
+ def call(self, discriminator_hidden_states):
995
+ hidden_states = self.dense(discriminator_hidden_states)
996
+ hidden_states = self.activation_function(hidden_states)
997
+ logits = tf.squeeze(self.dense_prediction(hidden_states))
998
+ return logits
999
+
1000
+ def build(self, input_shape=None):
1001
+ if self.built:
1002
+ return
1003
+ self.built = True
1004
+ if getattr(self, "dense", None) is not None:
1005
+ with tf.name_scope(self.dense.name):
1006
+ self.dense.build([None, None, self.config.d_model])
1007
+ if getattr(self, "dense_prediction", None) is not None:
1008
+ with tf.name_scope(self.dense_prediction.name):
1009
+ self.dense_prediction.build([None, None, self.config.d_model])
1010
+
1011
+
1012
+ class TFFunnelMaskedLMHead(keras.layers.Layer):
1013
+ def __init__(self, config, input_embeddings, **kwargs):
1014
+ super().__init__(**kwargs)
1015
+ self.config = config
1016
+ self.hidden_size = config.hidden_size
1017
+ self.input_embeddings = input_embeddings
1018
+
1019
+ def build(self, input_shape):
1020
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1021
+
1022
+ super().build(input_shape)
1023
+
1024
+ def get_output_embeddings(self):
1025
+ return self.input_embeddings
1026
+
1027
+ def set_output_embeddings(self, value):
1028
+ self.input_embeddings.weight = value
1029
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1030
+
1031
+ def get_bias(self):
1032
+ return {"bias": self.bias}
1033
+
1034
+ def set_bias(self, value):
1035
+ self.bias = value["bias"]
1036
+ self.config.vocab_size = shape_list(value["bias"])[0]
1037
+
1038
+ def call(self, hidden_states, training=False):
1039
+ seq_length = shape_list(tensor=hidden_states)[1]
1040
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1041
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1042
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1043
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1044
+
1045
+ return hidden_states
1046
+
1047
+
1048
+ class TFFunnelClassificationHead(keras.layers.Layer):
1049
+ def __init__(self, config, n_labels, **kwargs):
1050
+ super().__init__(**kwargs)
1051
+ initializer = get_initializer(config.initializer_range)
1052
+ self.linear_hidden = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_hidden")
1053
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1054
+ self.linear_out = keras.layers.Dense(n_labels, kernel_initializer=initializer, name="linear_out")
1055
+ self.config = config
1056
+
1057
+ def call(self, hidden, training=False):
1058
+ hidden = self.linear_hidden(hidden)
1059
+ hidden = keras.activations.tanh(hidden)
1060
+ hidden = self.dropout(hidden, training=training)
1061
+ return self.linear_out(hidden)
1062
+
1063
+ def build(self, input_shape=None):
1064
+ if self.built:
1065
+ return
1066
+ self.built = True
1067
+ if getattr(self, "linear_hidden", None) is not None:
1068
+ with tf.name_scope(self.linear_hidden.name):
1069
+ self.linear_hidden.build([None, None, self.config.d_model])
1070
+ if getattr(self, "linear_out", None) is not None:
1071
+ with tf.name_scope(self.linear_out.name):
1072
+ self.linear_out.build([None, None, self.config.d_model])
1073
+
1074
+
1075
+ class TFFunnelPreTrainedModel(TFPreTrainedModel):
1076
+ """
1077
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1078
+ models.
1079
+ """
1080
+
1081
+ config_class = FunnelConfig
1082
+ base_model_prefix = "funnel"
1083
+
1084
+ @property
1085
+ def dummy_inputs(self):
1086
+ # Funnel misbehaves with very small inputs, so we override and make them a bit bigger
1087
+ return {"input_ids": tf.ones((1, 3), dtype=tf.int32)}
1088
+
1089
+
1090
+ @dataclass
1091
+ class TFFunnelForPreTrainingOutput(ModelOutput):
1092
+ """
1093
+ Output type of [`FunnelForPreTraining`].
1094
+
1095
+ Args:
1096
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
1097
+ Prediction scores of the head (scores for each token before SoftMax).
1098
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1099
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1100
+ `(batch_size, sequence_length, hidden_size)`.
1101
+
1102
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1103
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1104
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1105
+ sequence_length)`.
1106
+
1107
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1108
+ heads.
1109
+ """
1110
+
1111
+ logits: tf.Tensor = None
1112
+ hidden_states: Tuple[tf.Tensor] | None = None
1113
+ attentions: Tuple[tf.Tensor] | None = None
1114
+
1115
+
1116
+ FUNNEL_START_DOCSTRING = r"""
1117
+
1118
+ The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
1119
+ Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
1120
+
1121
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1122
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1123
+ etc.)
1124
+
1125
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1126
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1127
+ behavior.
1128
+
1129
+ <Tip>
1130
+
1131
+ TensorFlow models and layers in `transformers` accept two formats as input:
1132
+
1133
+ - having all inputs as keyword arguments (like PyTorch models), or
1134
+ - having all inputs as a list, tuple or dict in the first positional argument.
1135
+
1136
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1137
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1138
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1139
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1140
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1141
+ positional argument:
1142
+
1143
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1144
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1145
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1146
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1147
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1148
+
1149
+ Note that when creating models and layers with
1150
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1151
+ about any of this, as you can just pass inputs like you would to any other Python function!
1152
+
1153
+ </Tip>
1154
+
1155
+ Parameters:
1156
+ config ([`XxxConfig`]): Model configuration class with all the parameters of the model.
1157
+ Initializing with a config file does not load the weights associated with the model, only the
1158
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1159
+ """
1160
+
1161
+ FUNNEL_INPUTS_DOCSTRING = r"""
1162
+ Args:
1163
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
1164
+ Indices of input sequence tokens in the vocabulary.
1165
+
1166
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1167
+ [`PreTrainedTokenizer.encode`] for details.
1168
+
1169
+ [What are input IDs?](../glossary#input-ids)
1170
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1171
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1172
+
1173
+ - 1 for tokens that are **not masked**,
1174
+ - 0 for tokens that are **masked**.
1175
+
1176
+ [What are attention masks?](../glossary#attention-mask)
1177
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1178
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1179
+ 1]`:
1180
+
1181
+ - 0 corresponds to a *sentence A* token,
1182
+ - 1 corresponds to a *sentence B* token.
1183
+
1184
+ [What are token type IDs?](../glossary#token-type-ids)
1185
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1186
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1187
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1188
+ model's internal embedding lookup matrix.
1189
+ output_attentions (`bool`, *optional*):
1190
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1191
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1192
+ config will be used instead.
1193
+ output_hidden_states (`bool`, *optional*):
1194
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1195
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1196
+ used instead.
1197
+ return_dict (`bool`, *optional*):
1198
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1199
+ eager mode, in graph mode the value will always be set to True.
1200
+ training (`bool`, *optional*, defaults to `False`):
1201
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1202
+ behaviors between training and evaluation).
1203
+ """
1204
+
1205
+
1206
+ @add_start_docstrings(
1207
+ """
1208
+ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
1209
+ decoder) or any task-specific head on top.
1210
+ """,
1211
+ FUNNEL_START_DOCSTRING,
1212
+ )
1213
+ class TFFunnelBaseModel(TFFunnelPreTrainedModel):
1214
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1215
+ super().__init__(config, *inputs, **kwargs)
1216
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
1217
+
1218
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1219
+ @add_code_sample_docstrings(
1220
+ checkpoint="funnel-transformer/small-base",
1221
+ output_type=TFBaseModelOutput,
1222
+ config_class=_CONFIG_FOR_DOC,
1223
+ )
1224
+ @unpack_inputs
1225
+ def call(
1226
+ self,
1227
+ input_ids: TFModelInputType | None = None,
1228
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1229
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1230
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1231
+ output_attentions: Optional[bool] = None,
1232
+ output_hidden_states: Optional[bool] = None,
1233
+ return_dict: Optional[bool] = None,
1234
+ training: bool = False,
1235
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]:
1236
+ return self.funnel(
1237
+ input_ids=input_ids,
1238
+ attention_mask=attention_mask,
1239
+ token_type_ids=token_type_ids,
1240
+ inputs_embeds=inputs_embeds,
1241
+ output_attentions=output_attentions,
1242
+ output_hidden_states=output_hidden_states,
1243
+ return_dict=return_dict,
1244
+ training=training,
1245
+ )
1246
+
1247
+ def serving_output(self, output):
1248
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1249
+ # different dimensions
1250
+ return TFBaseModelOutput(
1251
+ last_hidden_state=output.last_hidden_state,
1252
+ hidden_states=output.hidden_states,
1253
+ attentions=output.attentions,
1254
+ )
1255
+
1256
+ def build(self, input_shape=None):
1257
+ if self.built:
1258
+ return
1259
+ self.built = True
1260
+ if getattr(self, "funnel", None) is not None:
1261
+ with tf.name_scope(self.funnel.name):
1262
+ self.funnel.build(None)
1263
+
1264
+
1265
+ @add_start_docstrings(
1266
+ "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
1267
+ FUNNEL_START_DOCSTRING,
1268
+ )
1269
+ class TFFunnelModel(TFFunnelPreTrainedModel):
1270
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1271
+ super().__init__(config, *inputs, **kwargs)
1272
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1273
+
1274
+ @unpack_inputs
1275
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1276
+ @add_code_sample_docstrings(
1277
+ checkpoint="funnel-transformer/small",
1278
+ output_type=TFBaseModelOutput,
1279
+ config_class=_CONFIG_FOR_DOC,
1280
+ )
1281
+ def call(
1282
+ self,
1283
+ input_ids: TFModelInputType | None = None,
1284
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1285
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1286
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1287
+ output_attentions: Optional[bool] = None,
1288
+ output_hidden_states: Optional[bool] = None,
1289
+ return_dict: Optional[bool] = None,
1290
+ training: bool = False,
1291
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]:
1292
+ return self.funnel(
1293
+ input_ids=input_ids,
1294
+ attention_mask=attention_mask,
1295
+ token_type_ids=token_type_ids,
1296
+ inputs_embeds=inputs_embeds,
1297
+ output_attentions=output_attentions,
1298
+ output_hidden_states=output_hidden_states,
1299
+ return_dict=return_dict,
1300
+ training=training,
1301
+ )
1302
+
1303
+ def serving_output(self, output):
1304
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1305
+ # different dimensions
1306
+ return TFBaseModelOutput(
1307
+ last_hidden_state=output.last_hidden_state,
1308
+ hidden_states=output.hidden_states,
1309
+ attentions=output.attentions,
1310
+ )
1311
+
1312
+ def build(self, input_shape=None):
1313
+ if self.built:
1314
+ return
1315
+ self.built = True
1316
+ if getattr(self, "funnel", None) is not None:
1317
+ with tf.name_scope(self.funnel.name):
1318
+ self.funnel.build(None)
1319
+
1320
+
1321
+ @add_start_docstrings(
1322
+ """
1323
+ Funnel model with a binary classification head on top as used during pretraining for identifying generated tokens.
1324
+ """,
1325
+ FUNNEL_START_DOCSTRING,
1326
+ )
1327
+ class TFFunnelForPreTraining(TFFunnelPreTrainedModel):
1328
+ def __init__(self, config: FunnelConfig, **kwargs) -> None:
1329
+ super().__init__(config, **kwargs)
1330
+
1331
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1332
+ self.discriminator_predictions = TFFunnelDiscriminatorPredictions(config, name="discriminator_predictions")
1333
+
1334
+ @unpack_inputs
1335
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1336
+ @replace_return_docstrings(output_type=TFFunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1337
+ def call(
1338
+ self,
1339
+ input_ids: TFModelInputType | None = None,
1340
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1341
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1342
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1343
+ output_attentions: Optional[bool] = None,
1344
+ output_hidden_states: Optional[bool] = None,
1345
+ return_dict: Optional[bool] = None,
1346
+ training: bool = False,
1347
+ **kwargs,
1348
+ ) -> Union[Tuple[tf.Tensor], TFFunnelForPreTrainingOutput]:
1349
+ r"""
1350
+ Returns:
1351
+
1352
+ Examples:
1353
+
1354
+ ```python
1355
+ >>> from transformers import AutoTokenizer, TFFunnelForPreTraining
1356
+ >>> import torch
1357
+
1358
+ >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small")
1359
+ >>> model = TFFunnelForPreTraining.from_pretrained("funnel-transformer/small")
1360
+
1361
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
1362
+ >>> logits = model(inputs).logits
1363
+ ```"""
1364
+ discriminator_hidden_states = self.funnel(
1365
+ input_ids,
1366
+ attention_mask,
1367
+ token_type_ids,
1368
+ inputs_embeds,
1369
+ output_attentions,
1370
+ output_hidden_states,
1371
+ return_dict=return_dict,
1372
+ training=training,
1373
+ )
1374
+ discriminator_sequence_output = discriminator_hidden_states[0]
1375
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1376
+
1377
+ if not return_dict:
1378
+ return (logits,) + discriminator_hidden_states[1:]
1379
+
1380
+ return TFFunnelForPreTrainingOutput(
1381
+ logits=logits,
1382
+ hidden_states=discriminator_hidden_states.hidden_states,
1383
+ attentions=discriminator_hidden_states.attentions,
1384
+ )
1385
+
1386
+ def serving_output(self, output):
1387
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1388
+ # different dimensions
1389
+ return TFFunnelForPreTrainingOutput(
1390
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1391
+ )
1392
+
1393
+ def build(self, input_shape=None):
1394
+ if self.built:
1395
+ return
1396
+ self.built = True
1397
+ if getattr(self, "funnel", None) is not None:
1398
+ with tf.name_scope(self.funnel.name):
1399
+ self.funnel.build(None)
1400
+ if getattr(self, "discriminator_predictions", None) is not None:
1401
+ with tf.name_scope(self.discriminator_predictions.name):
1402
+ self.discriminator_predictions.build(None)
1403
+
1404
+
1405
+ @add_start_docstrings("""Funnel Model with a `language modeling` head on top.""", FUNNEL_START_DOCSTRING)
1406
+ class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss):
1407
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1408
+ super().__init__(config, *inputs, **kwargs)
1409
+
1410
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1411
+ self.lm_head = TFFunnelMaskedLMHead(config, self.funnel.embeddings, name="lm_head")
1412
+
1413
+ def get_lm_head(self) -> TFFunnelMaskedLMHead:
1414
+ return self.lm_head
1415
+
1416
+ def get_prefix_bias_name(self) -> str:
1417
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1418
+ return self.name + "/" + self.lm_head.name
1419
+
1420
+ @unpack_inputs
1421
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1422
+ @add_code_sample_docstrings(
1423
+ checkpoint="funnel-transformer/small",
1424
+ output_type=TFMaskedLMOutput,
1425
+ config_class=_CONFIG_FOR_DOC,
1426
+ )
1427
+ def call(
1428
+ self,
1429
+ input_ids: TFModelInputType | None = None,
1430
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1431
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1432
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1433
+ output_attentions: Optional[bool] = None,
1434
+ output_hidden_states: Optional[bool] = None,
1435
+ return_dict: Optional[bool] = None,
1436
+ labels: np.ndarray | tf.Tensor | None = None,
1437
+ training: bool = False,
1438
+ ) -> Union[Tuple[tf.Tensor], TFMaskedLMOutput]:
1439
+ r"""
1440
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1441
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1442
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1443
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1444
+ """
1445
+ outputs = self.funnel(
1446
+ input_ids,
1447
+ attention_mask,
1448
+ token_type_ids,
1449
+ inputs_embeds,
1450
+ output_attentions,
1451
+ output_hidden_states,
1452
+ return_dict=return_dict,
1453
+ training=training,
1454
+ )
1455
+ sequence_output = outputs[0]
1456
+ prediction_scores = self.lm_head(sequence_output, training=training)
1457
+
1458
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1459
+
1460
+ if not return_dict:
1461
+ output = (prediction_scores,) + outputs[1:]
1462
+ return ((loss,) + output) if loss is not None else output
1463
+
1464
+ return TFMaskedLMOutput(
1465
+ loss=loss,
1466
+ logits=prediction_scores,
1467
+ hidden_states=outputs.hidden_states,
1468
+ attentions=outputs.attentions,
1469
+ )
1470
+
1471
+ def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
1472
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1473
+ # different dimensions
1474
+ return TFMaskedLMOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions)
1475
+
1476
+ def build(self, input_shape=None):
1477
+ if self.built:
1478
+ return
1479
+ self.built = True
1480
+ if getattr(self, "funnel", None) is not None:
1481
+ with tf.name_scope(self.funnel.name):
1482
+ self.funnel.build(None)
1483
+ if getattr(self, "lm_head", None) is not None:
1484
+ with tf.name_scope(self.lm_head.name):
1485
+ self.lm_head.build(None)
1486
+
1487
+
1488
+ @add_start_docstrings(
1489
+ """
1490
+ Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1491
+ output) e.g. for GLUE tasks.
1492
+ """,
1493
+ FUNNEL_START_DOCSTRING,
1494
+ )
1495
+ class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClassificationLoss):
1496
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1497
+ super().__init__(config, *inputs, **kwargs)
1498
+ self.num_labels = config.num_labels
1499
+
1500
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
1501
+ self.classifier = TFFunnelClassificationHead(config, config.num_labels, name="classifier")
1502
+
1503
+ @unpack_inputs
1504
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1505
+ @add_code_sample_docstrings(
1506
+ checkpoint="funnel-transformer/small-base",
1507
+ output_type=TFSequenceClassifierOutput,
1508
+ config_class=_CONFIG_FOR_DOC,
1509
+ )
1510
+ def call(
1511
+ self,
1512
+ input_ids: TFModelInputType | None = None,
1513
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1514
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1515
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1516
+ output_attentions: Optional[bool] = None,
1517
+ output_hidden_states: Optional[bool] = None,
1518
+ return_dict: Optional[bool] = None,
1519
+ labels: np.ndarray | tf.Tensor | None = None,
1520
+ training: bool = False,
1521
+ ) -> Union[Tuple[tf.Tensor], TFSequenceClassifierOutput]:
1522
+ r"""
1523
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1524
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1525
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1526
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1527
+ """
1528
+ outputs = self.funnel(
1529
+ input_ids,
1530
+ attention_mask,
1531
+ token_type_ids,
1532
+ inputs_embeds,
1533
+ output_attentions,
1534
+ output_hidden_states,
1535
+ return_dict=return_dict,
1536
+ training=training,
1537
+ )
1538
+ last_hidden_state = outputs[0]
1539
+ pooled_output = last_hidden_state[:, 0]
1540
+ logits = self.classifier(pooled_output, training=training)
1541
+
1542
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1543
+
1544
+ if not return_dict:
1545
+ output = (logits,) + outputs[1:]
1546
+ return ((loss,) + output) if loss is not None else output
1547
+
1548
+ return TFSequenceClassifierOutput(
1549
+ loss=loss,
1550
+ logits=logits,
1551
+ hidden_states=outputs.hidden_states,
1552
+ attentions=outputs.attentions,
1553
+ )
1554
+
1555
+ def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
1556
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1557
+ # different dimensions
1558
+ return TFSequenceClassifierOutput(
1559
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1560
+ )
1561
+
1562
+ def build(self, input_shape=None):
1563
+ if self.built:
1564
+ return
1565
+ self.built = True
1566
+ if getattr(self, "funnel", None) is not None:
1567
+ with tf.name_scope(self.funnel.name):
1568
+ self.funnel.build(None)
1569
+ if getattr(self, "classifier", None) is not None:
1570
+ with tf.name_scope(self.classifier.name):
1571
+ self.classifier.build(None)
1572
+
1573
+
1574
+ @add_start_docstrings(
1575
+ """
1576
+ Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1577
+ softmax) e.g. for RocStories/SWAG tasks.
1578
+ """,
1579
+ FUNNEL_START_DOCSTRING,
1580
+ )
1581
+ class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss):
1582
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1583
+ super().__init__(config, *inputs, **kwargs)
1584
+
1585
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
1586
+ self.classifier = TFFunnelClassificationHead(config, 1, name="classifier")
1587
+
1588
+ @property
1589
+ def dummy_inputs(self):
1590
+ return {"input_ids": tf.ones((3, 3, 4), dtype=tf.int32)}
1591
+
1592
+ @unpack_inputs
1593
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1594
+ @add_code_sample_docstrings(
1595
+ checkpoint="funnel-transformer/small-base",
1596
+ output_type=TFMultipleChoiceModelOutput,
1597
+ config_class=_CONFIG_FOR_DOC,
1598
+ )
1599
+ def call(
1600
+ self,
1601
+ input_ids: TFModelInputType | None = None,
1602
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1603
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1604
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1605
+ output_attentions: Optional[bool] = None,
1606
+ output_hidden_states: Optional[bool] = None,
1607
+ return_dict: Optional[bool] = None,
1608
+ labels: np.ndarray | tf.Tensor | None = None,
1609
+ training: bool = False,
1610
+ ) -> Union[Tuple[tf.Tensor], TFMultipleChoiceModelOutput]:
1611
+ r"""
1612
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1613
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1614
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1615
+ """
1616
+ if input_ids is not None:
1617
+ num_choices = shape_list(input_ids)[1]
1618
+ seq_length = shape_list(input_ids)[2]
1619
+ else:
1620
+ num_choices = shape_list(inputs_embeds)[1]
1621
+ seq_length = shape_list(inputs_embeds)[2]
1622
+
1623
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1624
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1625
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1626
+ flat_inputs_embeds = (
1627
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1628
+ if inputs_embeds is not None
1629
+ else None
1630
+ )
1631
+
1632
+ outputs = self.funnel(
1633
+ flat_input_ids,
1634
+ attention_mask=flat_attention_mask,
1635
+ token_type_ids=flat_token_type_ids,
1636
+ inputs_embeds=flat_inputs_embeds,
1637
+ output_attentions=output_attentions,
1638
+ output_hidden_states=output_hidden_states,
1639
+ return_dict=return_dict,
1640
+ training=training,
1641
+ )
1642
+
1643
+ last_hidden_state = outputs[0]
1644
+ pooled_output = last_hidden_state[:, 0]
1645
+ logits = self.classifier(pooled_output, training=training)
1646
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1647
+
1648
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1649
+
1650
+ if not return_dict:
1651
+ output = (reshaped_logits,) + outputs[1:]
1652
+ return ((loss,) + output) if loss is not None else output
1653
+
1654
+ return TFMultipleChoiceModelOutput(
1655
+ loss=loss,
1656
+ logits=reshaped_logits,
1657
+ hidden_states=outputs.hidden_states,
1658
+ attentions=outputs.attentions,
1659
+ )
1660
+
1661
+ def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
1662
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1663
+ # different dimensions
1664
+ return TFMultipleChoiceModelOutput(
1665
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1666
+ )
1667
+
1668
+ def build(self, input_shape=None):
1669
+ if self.built:
1670
+ return
1671
+ self.built = True
1672
+ if getattr(self, "funnel", None) is not None:
1673
+ with tf.name_scope(self.funnel.name):
1674
+ self.funnel.build(None)
1675
+ if getattr(self, "classifier", None) is not None:
1676
+ with tf.name_scope(self.classifier.name):
1677
+ self.classifier.build(None)
1678
+
1679
+
1680
+ @add_start_docstrings(
1681
+ """
1682
+ Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1683
+ Named-Entity-Recognition (NER) tasks.
1684
+ """,
1685
+ FUNNEL_START_DOCSTRING,
1686
+ )
1687
+ class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificationLoss):
1688
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1689
+ super().__init__(config, *inputs, **kwargs)
1690
+ self.num_labels = config.num_labels
1691
+
1692
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1693
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1694
+ self.classifier = keras.layers.Dense(
1695
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1696
+ )
1697
+ self.config = config
1698
+
1699
+ @unpack_inputs
1700
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1701
+ @add_code_sample_docstrings(
1702
+ checkpoint="funnel-transformer/small",
1703
+ output_type=TFTokenClassifierOutput,
1704
+ config_class=_CONFIG_FOR_DOC,
1705
+ )
1706
+ def call(
1707
+ self,
1708
+ input_ids: TFModelInputType | None = None,
1709
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1710
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1711
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1712
+ output_attentions: Optional[bool] = None,
1713
+ output_hidden_states: Optional[bool] = None,
1714
+ return_dict: Optional[bool] = None,
1715
+ labels: np.ndarray | tf.Tensor | None = None,
1716
+ training: bool = False,
1717
+ ) -> Union[Tuple[tf.Tensor], TFTokenClassifierOutput]:
1718
+ r"""
1719
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1720
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1721
+ """
1722
+ outputs = self.funnel(
1723
+ input_ids,
1724
+ attention_mask,
1725
+ token_type_ids,
1726
+ inputs_embeds,
1727
+ output_attentions,
1728
+ output_hidden_states,
1729
+ return_dict=return_dict,
1730
+ training=training,
1731
+ )
1732
+ sequence_output = outputs[0]
1733
+
1734
+ sequence_output = self.dropout(sequence_output, training=training)
1735
+ logits = self.classifier(sequence_output)
1736
+
1737
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1738
+
1739
+ if not return_dict:
1740
+ output = (logits,) + outputs[1:]
1741
+ return ((loss,) + output) if loss is not None else output
1742
+
1743
+ return TFTokenClassifierOutput(
1744
+ loss=loss,
1745
+ logits=logits,
1746
+ hidden_states=outputs.hidden_states,
1747
+ attentions=outputs.attentions,
1748
+ )
1749
+
1750
+ def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
1751
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1752
+ # different dimensions
1753
+ return TFTokenClassifierOutput(
1754
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1755
+ )
1756
+
1757
+ def build(self, input_shape=None):
1758
+ if self.built:
1759
+ return
1760
+ self.built = True
1761
+ if getattr(self, "funnel", None) is not None:
1762
+ with tf.name_scope(self.funnel.name):
1763
+ self.funnel.build(None)
1764
+ if getattr(self, "classifier", None) is not None:
1765
+ with tf.name_scope(self.classifier.name):
1766
+ self.classifier.build([None, None, self.config.hidden_size])
1767
+
1768
+
1769
+ @add_start_docstrings(
1770
+ """
1771
+ Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1772
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1773
+ """,
1774
+ FUNNEL_START_DOCSTRING,
1775
+ )
1776
+ class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringLoss):
1777
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1778
+ super().__init__(config, *inputs, **kwargs)
1779
+ self.num_labels = config.num_labels
1780
+
1781
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1782
+ self.qa_outputs = keras.layers.Dense(
1783
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1784
+ )
1785
+ self.config = config
1786
+
1787
+ @unpack_inputs
1788
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1789
+ @add_code_sample_docstrings(
1790
+ checkpoint="funnel-transformer/small",
1791
+ output_type=TFQuestionAnsweringModelOutput,
1792
+ config_class=_CONFIG_FOR_DOC,
1793
+ )
1794
+ def call(
1795
+ self,
1796
+ input_ids: TFModelInputType | None = None,
1797
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1798
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1799
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1800
+ output_attentions: Optional[bool] = None,
1801
+ output_hidden_states: Optional[bool] = None,
1802
+ return_dict: Optional[bool] = None,
1803
+ start_positions: np.ndarray | tf.Tensor | None = None,
1804
+ end_positions: np.ndarray | tf.Tensor | None = None,
1805
+ training: bool = False,
1806
+ ) -> Union[Tuple[tf.Tensor], TFQuestionAnsweringModelOutput]:
1807
+ r"""
1808
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1809
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1810
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1811
+ are not taken into account for computing the loss.
1812
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1813
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1814
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1815
+ are not taken into account for computing the loss.
1816
+ """
1817
+
1818
+ outputs = self.funnel(
1819
+ input_ids,
1820
+ attention_mask,
1821
+ token_type_ids,
1822
+ inputs_embeds,
1823
+ output_attentions,
1824
+ output_hidden_states,
1825
+ return_dict=return_dict,
1826
+ training=training,
1827
+ )
1828
+ sequence_output = outputs[0]
1829
+
1830
+ logits = self.qa_outputs(sequence_output)
1831
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1832
+ start_logits = tf.squeeze(start_logits, axis=-1)
1833
+ end_logits = tf.squeeze(end_logits, axis=-1)
1834
+
1835
+ loss = None
1836
+ if start_positions is not None and end_positions is not None:
1837
+ labels = {"start_position": start_positions, "end_position": end_positions}
1838
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1839
+
1840
+ if not return_dict:
1841
+ output = (start_logits, end_logits) + outputs[1:]
1842
+ return ((loss,) + output) if loss is not None else output
1843
+
1844
+ return TFQuestionAnsweringModelOutput(
1845
+ loss=loss,
1846
+ start_logits=start_logits,
1847
+ end_logits=end_logits,
1848
+ hidden_states=outputs.hidden_states,
1849
+ attentions=outputs.attentions,
1850
+ )
1851
+
1852
+ def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
1853
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1854
+ # different dimensions
1855
+ return TFQuestionAnsweringModelOutput(
1856
+ start_logits=output.start_logits,
1857
+ end_logits=output.end_logits,
1858
+ hidden_states=output.hidden_states,
1859
+ attentions=output.attentions,
1860
+ )
1861
+
1862
+ def build(self, input_shape=None):
1863
+ if self.built:
1864
+ return
1865
+ self.built = True
1866
+ if getattr(self, "funnel", None) is not None:
1867
+ with tf.name_scope(self.funnel.name):
1868
+ self.funnel.build(None)
1869
+ if getattr(self, "qa_outputs", None) is not None:
1870
+ with tf.name_scope(self.qa_outputs.name):
1871
+ self.qa_outputs.build([None, None, self.config.hidden_size])
venv/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for Funnel Transformer."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+ _model_names = [
31
+ "small",
32
+ "small-base",
33
+ "medium",
34
+ "medium-base",
35
+ "intermediate",
36
+ "intermediate-base",
37
+ "large",
38
+ "large-base",
39
+ "xlarge",
40
+ "xlarge-base",
41
+ ]
42
+
43
+
44
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
45
+ def load_vocab(vocab_file):
46
+ """Loads a vocabulary file into a dictionary."""
47
+ vocab = collections.OrderedDict()
48
+ with open(vocab_file, "r", encoding="utf-8") as reader:
49
+ tokens = reader.readlines()
50
+ for index, token in enumerate(tokens):
51
+ token = token.rstrip("\n")
52
+ vocab[token] = index
53
+ return vocab
54
+
55
+
56
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
57
+ def whitespace_tokenize(text):
58
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
59
+ text = text.strip()
60
+ if not text:
61
+ return []
62
+ tokens = text.split()
63
+ return tokens
64
+
65
+
66
+ class FunnelTokenizer(PreTrainedTokenizer):
67
+ r"""
68
+ Construct a Funnel Transformer tokenizer. Based on WordPiece.
69
+
70
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
71
+ this superclass for more information regarding those methods.
72
+
73
+ Args:
74
+ vocab_file (`str`):
75
+ File containing the vocabulary.
76
+ do_lower_case (`bool`, *optional*, defaults to `True`):
77
+ Whether or not to lowercase the input when tokenizing.
78
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
79
+ Whether or not to do basic tokenization before WordPiece.
80
+ never_split (`Iterable`, *optional*):
81
+ Collection of tokens which will never be split during tokenization. Only has an effect when
82
+ `do_basic_tokenize=True`
83
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
84
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
85
+ token instead.
86
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
87
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
88
+ sequence classification or for a text and a question for question answering. It is also used as the last
89
+ token of a sequence built with special tokens.
90
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
91
+ The token used for padding, for example when batching sequences of different lengths.
92
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
93
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
94
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
95
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
96
+ The token used for masking values. This is the token used when training this model with masked language
97
+ modeling. This is the token which the model will try to predict.
98
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
99
+ The beginning of sentence token.
100
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
101
+ The end of sentence token.
102
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
103
+ Whether or not to tokenize Chinese characters.
104
+
105
+ This should likely be deactivated for Japanese (see this
106
+ [issue](https://github.com/huggingface/transformers/issues/328)).
107
+ strip_accents (`bool`, *optional*):
108
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
109
+ value for `lowercase` (as in the original BERT).
110
+ """
111
+
112
+ vocab_files_names = VOCAB_FILES_NAMES
113
+ cls_token_type_id: int = 2
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_file,
118
+ do_lower_case=True,
119
+ do_basic_tokenize=True,
120
+ never_split=None,
121
+ unk_token="<unk>",
122
+ sep_token="<sep>",
123
+ pad_token="<pad>",
124
+ cls_token="<cls>",
125
+ mask_token="<mask>",
126
+ bos_token="<s>",
127
+ eos_token="</s>",
128
+ tokenize_chinese_chars=True,
129
+ strip_accents=None,
130
+ **kwargs,
131
+ ):
132
+ if not os.path.isfile(vocab_file):
133
+ raise ValueError(
134
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
135
+ " model use `tokenizer = FunnelTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
136
+ )
137
+ self.vocab = load_vocab(vocab_file)
138
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
139
+ self.do_basic_tokenize = do_basic_tokenize
140
+ if do_basic_tokenize:
141
+ self.basic_tokenizer = BasicTokenizer(
142
+ do_lower_case=do_lower_case,
143
+ never_split=never_split,
144
+ tokenize_chinese_chars=tokenize_chinese_chars,
145
+ strip_accents=strip_accents,
146
+ )
147
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
148
+
149
+ super().__init__(
150
+ do_lower_case=do_lower_case,
151
+ do_basic_tokenize=do_basic_tokenize,
152
+ never_split=never_split,
153
+ unk_token=unk_token,
154
+ sep_token=sep_token,
155
+ pad_token=pad_token,
156
+ cls_token=cls_token,
157
+ mask_token=mask_token,
158
+ bos_token=bos_token,
159
+ eos_token=eos_token,
160
+ tokenize_chinese_chars=tokenize_chinese_chars,
161
+ strip_accents=strip_accents,
162
+ **kwargs,
163
+ )
164
+
165
+ @property
166
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
167
+ def do_lower_case(self):
168
+ return self.basic_tokenizer.do_lower_case
169
+
170
+ @property
171
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
172
+ def vocab_size(self):
173
+ return len(self.vocab)
174
+
175
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
176
+ def get_vocab(self):
177
+ return dict(self.vocab, **self.added_tokens_encoder)
178
+
179
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
180
+ def _tokenize(self, text, split_special_tokens=False):
181
+ split_tokens = []
182
+ if self.do_basic_tokenize:
183
+ for token in self.basic_tokenizer.tokenize(
184
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
185
+ ):
186
+ # If the token is part of the never_split set
187
+ if token in self.basic_tokenizer.never_split:
188
+ split_tokens.append(token)
189
+ else:
190
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
191
+ else:
192
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
193
+ return split_tokens
194
+
195
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
196
+ def _convert_token_to_id(self, token):
197
+ """Converts a token (str) in an id using the vocab."""
198
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
199
+
200
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
201
+ def _convert_id_to_token(self, index):
202
+ """Converts an index (integer) in a token (str) using the vocab."""
203
+ return self.ids_to_tokens.get(index, self.unk_token)
204
+
205
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
206
+ def convert_tokens_to_string(self, tokens):
207
+ """Converts a sequence of tokens (string) in a single string."""
208
+ out_string = " ".join(tokens).replace(" ##", "").strip()
209
+ return out_string
210
+
211
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
212
+ def build_inputs_with_special_tokens(
213
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
214
+ ) -> List[int]:
215
+ """
216
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
217
+ adding special tokens. A BERT sequence has the following format:
218
+
219
+ - single sequence: `[CLS] X [SEP]`
220
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs to which the special tokens will be added.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
230
+ """
231
+ if token_ids_1 is None:
232
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
233
+ cls = [self.cls_token_id]
234
+ sep = [self.sep_token_id]
235
+ return cls + token_ids_0 + sep + token_ids_1 + sep
236
+
237
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
238
+ def get_special_tokens_mask(
239
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
240
+ ) -> List[int]:
241
+ """
242
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
243
+ special tokens using the tokenizer `prepare_for_model` method.
244
+
245
+ Args:
246
+ token_ids_0 (`List[int]`):
247
+ List of IDs.
248
+ token_ids_1 (`List[int]`, *optional*):
249
+ Optional second list of IDs for sequence pairs.
250
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
251
+ Whether or not the token list is already formatted with special tokens for the model.
252
+
253
+ Returns:
254
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
255
+ """
256
+
257
+ if already_has_special_tokens:
258
+ return super().get_special_tokens_mask(
259
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
260
+ )
261
+
262
+ if token_ids_1 is not None:
263
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
264
+ return [1] + ([0] * len(token_ids_0)) + [1]
265
+
266
+ def create_token_type_ids_from_sequences(
267
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
268
+ ) -> List[int]:
269
+ """
270
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
271
+ Transformer sequence pair mask has the following format:
272
+
273
+ ```
274
+ 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
275
+ | first sequence | second sequence |
276
+ ```
277
+
278
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
279
+
280
+ Args:
281
+ token_ids_0 (`List[int]`):
282
+ List of IDs.
283
+ token_ids_1 (`List[int]`, *optional*):
284
+ Optional second list of IDs for sequence pairs.
285
+
286
+ Returns:
287
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
288
+ """
289
+ sep = [self.sep_token_id]
290
+ cls = [self.cls_token_id]
291
+ if token_ids_1 is None:
292
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
293
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
294
+
295
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
296
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
297
+ index = 0
298
+ if os.path.isdir(save_directory):
299
+ vocab_file = os.path.join(
300
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
301
+ )
302
+ else:
303
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
304
+ with open(vocab_file, "w", encoding="utf-8") as writer:
305
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
306
+ if index != token_index:
307
+ logger.warning(
308
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
309
+ " Please check that the vocabulary is not corrupted!"
310
+ )
311
+ index = token_index
312
+ writer.write(token + "\n")
313
+ index += 1
314
+ return (vocab_file,)
315
+
316
+
317
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
318
+ class BasicTokenizer(object):
319
+ """
320
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
321
+
322
+ Args:
323
+ do_lower_case (`bool`, *optional*, defaults to `True`):
324
+ Whether or not to lowercase the input when tokenizing.
325
+ never_split (`Iterable`, *optional*):
326
+ Collection of tokens which will never be split during tokenization. Only has an effect when
327
+ `do_basic_tokenize=True`
328
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
329
+ Whether or not to tokenize Chinese characters.
330
+
331
+ This should likely be deactivated for Japanese (see this
332
+ [issue](https://github.com/huggingface/transformers/issues/328)).
333
+ strip_accents (`bool`, *optional*):
334
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
335
+ value for `lowercase` (as in the original BERT).
336
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
337
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
338
+ the full context of the words, such as contractions.
339
+ """
340
+
341
+ def __init__(
342
+ self,
343
+ do_lower_case=True,
344
+ never_split=None,
345
+ tokenize_chinese_chars=True,
346
+ strip_accents=None,
347
+ do_split_on_punc=True,
348
+ ):
349
+ if never_split is None:
350
+ never_split = []
351
+ self.do_lower_case = do_lower_case
352
+ self.never_split = set(never_split)
353
+ self.tokenize_chinese_chars = tokenize_chinese_chars
354
+ self.strip_accents = strip_accents
355
+ self.do_split_on_punc = do_split_on_punc
356
+
357
+ def tokenize(self, text, never_split=None):
358
+ """
359
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
360
+
361
+ Args:
362
+ never_split (`List[str]`, *optional*)
363
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
364
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
365
+ """
366
+ # union() returns a new set by concatenating the two sets.
367
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
368
+ text = self._clean_text(text)
369
+
370
+ # This was added on November 1st, 2018 for the multilingual and Chinese
371
+ # models. This is also applied to the English models now, but it doesn't
372
+ # matter since the English models were not trained on any Chinese data
373
+ # and generally don't have any Chinese data in them (there are Chinese
374
+ # characters in the vocabulary because Wikipedia does have some Chinese
375
+ # words in the English Wikipedia.).
376
+ if self.tokenize_chinese_chars:
377
+ text = self._tokenize_chinese_chars(text)
378
+ # prevents treating the same character with different unicode codepoints as different characters
379
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
380
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
381
+ split_tokens = []
382
+ for token in orig_tokens:
383
+ if token not in never_split:
384
+ if self.do_lower_case:
385
+ token = token.lower()
386
+ if self.strip_accents is not False:
387
+ token = self._run_strip_accents(token)
388
+ elif self.strip_accents:
389
+ token = self._run_strip_accents(token)
390
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
391
+
392
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
393
+ return output_tokens
394
+
395
+ def _run_strip_accents(self, text):
396
+ """Strips accents from a piece of text."""
397
+ text = unicodedata.normalize("NFD", text)
398
+ output = []
399
+ for char in text:
400
+ cat = unicodedata.category(char)
401
+ if cat == "Mn":
402
+ continue
403
+ output.append(char)
404
+ return "".join(output)
405
+
406
+ def _run_split_on_punc(self, text, never_split=None):
407
+ """Splits punctuation on a piece of text."""
408
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
409
+ return [text]
410
+ chars = list(text)
411
+ i = 0
412
+ start_new_word = True
413
+ output = []
414
+ while i < len(chars):
415
+ char = chars[i]
416
+ if _is_punctuation(char):
417
+ output.append([char])
418
+ start_new_word = True
419
+ else:
420
+ if start_new_word:
421
+ output.append([])
422
+ start_new_word = False
423
+ output[-1].append(char)
424
+ i += 1
425
+
426
+ return ["".join(x) for x in output]
427
+
428
+ def _tokenize_chinese_chars(self, text):
429
+ """Adds whitespace around any CJK character."""
430
+ output = []
431
+ for char in text:
432
+ cp = ord(char)
433
+ if self._is_chinese_char(cp):
434
+ output.append(" ")
435
+ output.append(char)
436
+ output.append(" ")
437
+ else:
438
+ output.append(char)
439
+ return "".join(output)
440
+
441
+ def _is_chinese_char(self, cp):
442
+ """Checks whether CP is the codepoint of a CJK character."""
443
+ # This defines a "chinese character" as anything in the CJK Unicode block:
444
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
445
+ #
446
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
447
+ # despite its name. The modern Korean Hangul alphabet is a different block,
448
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
449
+ # space-separated words, so they are not treated specially and handled
450
+ # like the all of the other languages.
451
+ if (
452
+ (cp >= 0x4E00 and cp <= 0x9FFF)
453
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
454
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
455
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
456
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
457
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
458
+ or (cp >= 0xF900 and cp <= 0xFAFF)
459
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
460
+ ): #
461
+ return True
462
+
463
+ return False
464
+
465
+ def _clean_text(self, text):
466
+ """Performs invalid character removal and whitespace cleanup on text."""
467
+ output = []
468
+ for char in text:
469
+ cp = ord(char)
470
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
471
+ continue
472
+ if _is_whitespace(char):
473
+ output.append(" ")
474
+ else:
475
+ output.append(char)
476
+ return "".join(output)
477
+
478
+
479
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
480
+ class WordpieceTokenizer(object):
481
+ """Runs WordPiece tokenization."""
482
+
483
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
484
+ self.vocab = vocab
485
+ self.unk_token = unk_token
486
+ self.max_input_chars_per_word = max_input_chars_per_word
487
+
488
+ def tokenize(self, text):
489
+ """
490
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
491
+ tokenization using the given vocabulary.
492
+
493
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
494
+
495
+ Args:
496
+ text: A single token or whitespace separated tokens. This should have
497
+ already been passed through *BasicTokenizer*.
498
+
499
+ Returns:
500
+ A list of wordpiece tokens.
501
+ """
502
+
503
+ output_tokens = []
504
+ for token in whitespace_tokenize(text):
505
+ chars = list(token)
506
+ if len(chars) > self.max_input_chars_per_word:
507
+ output_tokens.append(self.unk_token)
508
+ continue
509
+
510
+ is_bad = False
511
+ start = 0
512
+ sub_tokens = []
513
+ while start < len(chars):
514
+ end = len(chars)
515
+ cur_substr = None
516
+ while start < end:
517
+ substr = "".join(chars[start:end])
518
+ if start > 0:
519
+ substr = "##" + substr
520
+ if substr in self.vocab:
521
+ cur_substr = substr
522
+ break
523
+ end -= 1
524
+ if cur_substr is None:
525
+ is_bad = True
526
+ break
527
+ sub_tokens.append(cur_substr)
528
+ start = end
529
+
530
+ if is_bad:
531
+ output_tokens.append(self.unk_token)
532
+ else:
533
+ output_tokens.extend(sub_tokens)
534
+ return output_tokens
venv/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for Funnel Transformer."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_funnel import FunnelTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+ _model_names = [
32
+ "small",
33
+ "small-base",
34
+ "medium",
35
+ "medium-base",
36
+ "intermediate",
37
+ "intermediate-base",
38
+ "large",
39
+ "large-base",
40
+ "xlarge",
41
+ "xlarge-base",
42
+ ]
43
+
44
+
45
+ class FunnelTokenizerFast(PreTrainedTokenizerFast):
46
+ r"""
47
+ Construct a "fast" Funnel Transformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
48
+
49
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
50
+ refer to this superclass for more information regarding those methods.
51
+
52
+ Args:
53
+ vocab_file (`str`):
54
+ File containing the vocabulary.
55
+ do_lower_case (`bool`, *optional*, defaults to `True`):
56
+ Whether or not to lowercase the input when tokenizing.
57
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
58
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
59
+ token instead.
60
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
61
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
62
+ sequence classification or for a text and a question for question answering. It is also used as the last
63
+ token of a sequence built with special tokens.
64
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
65
+ The token used for padding, for example when batching sequences of different lengths.
66
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
67
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
68
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
69
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
70
+ The token used for masking values. This is the token used when training this model with masked language
71
+ modeling. This is the token which the model will try to predict.
72
+ clean_text (`bool`, *optional*, defaults to `True`):
73
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
74
+ whitespaces by the classic one.
75
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
76
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
77
+ issue](https://github.com/huggingface/transformers/issues/328)).
78
+ bos_token (`str`, `optional`, defaults to `"<s>"`):
79
+ The beginning of sentence token.
80
+ eos_token (`str`, `optional`, defaults to `"</s>"`):
81
+ The end of sentence token.
82
+ strip_accents (`bool`, *optional*):
83
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
84
+ value for `lowercase` (as in the original BERT).
85
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
86
+ The prefix for subwords.
87
+ """
88
+
89
+ vocab_files_names = VOCAB_FILES_NAMES
90
+ slow_tokenizer_class = FunnelTokenizer
91
+ cls_token_type_id: int = 2
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_file=None,
96
+ tokenizer_file=None,
97
+ do_lower_case=True,
98
+ unk_token="<unk>",
99
+ sep_token="<sep>",
100
+ pad_token="<pad>",
101
+ cls_token="<cls>",
102
+ mask_token="<mask>",
103
+ bos_token="<s>",
104
+ eos_token="</s>",
105
+ clean_text=True,
106
+ tokenize_chinese_chars=True,
107
+ strip_accents=None,
108
+ wordpieces_prefix="##",
109
+ **kwargs,
110
+ ):
111
+ super().__init__(
112
+ vocab_file,
113
+ tokenizer_file=tokenizer_file,
114
+ do_lower_case=do_lower_case,
115
+ unk_token=unk_token,
116
+ sep_token=sep_token,
117
+ pad_token=pad_token,
118
+ cls_token=cls_token,
119
+ mask_token=mask_token,
120
+ bos_token=bos_token,
121
+ eos_token=eos_token,
122
+ clean_text=clean_text,
123
+ tokenize_chinese_chars=tokenize_chinese_chars,
124
+ strip_accents=strip_accents,
125
+ wordpieces_prefix=wordpieces_prefix,
126
+ **kwargs,
127
+ )
128
+
129
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
130
+ if (
131
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
132
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
133
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
134
+ ):
135
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
136
+ normalizer_state["lowercase"] = do_lower_case
137
+ normalizer_state["strip_accents"] = strip_accents
138
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
139
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
140
+
141
+ self.do_lower_case = do_lower_case
142
+
143
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens with BERT->Funnel
144
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
145
+ """
146
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
147
+ adding special tokens. A Funnel sequence has the following format:
148
+
149
+ - single sequence: `[CLS] X [SEP]`
150
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs to which the special tokens will be added.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
160
+ """
161
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
162
+
163
+ if token_ids_1 is not None:
164
+ output += token_ids_1 + [self.sep_token_id]
165
+
166
+ return output
167
+
168
+ def create_token_type_ids_from_sequences(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
173
+ Transformer sequence pair mask has the following format:
174
+
175
+ ```
176
+ 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
177
+ | first sequence | second sequence |
178
+ ```
179
+
180
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
181
+
182
+ Args:
183
+ token_ids_0 (`List[int]`):
184
+ List of IDs.
185
+ token_ids_1 (`List[int]`, *optional*):
186
+ Optional second list of IDs for sequence pairs.
187
+
188
+ Returns:
189
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
190
+ """
191
+ sep = [self.sep_token_id]
192
+ cls = [self.cls_token_id]
193
+ if token_ids_1 is None:
194
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
195
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
196
+
197
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
198
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
199
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
200
+ return tuple(files)
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
17
+ from ...utils import OptionalDependencyNotAvailable
18
+
19
+
20
+ _import_structure = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
21
+
22
+ try:
23
+ if not is_tokenizers_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_gpt_neox_fast"] = ["GPTNeoXTokenizerFast"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_gpt_neox"] = [
37
+ "GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "GPTNeoXForCausalLM",
39
+ "GPTNeoXForQuestionAnswering",
40
+ "GPTNeoXForSequenceClassification",
41
+ "GPTNeoXForTokenClassification",
42
+ "GPTNeoXLayer",
43
+ "GPTNeoXModel",
44
+ "GPTNeoXPreTrainedModel",
45
+ ]
46
+
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
50
+
51
+ try:
52
+ if not is_tokenizers_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
58
+
59
+ try:
60
+ if not is_torch_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .modeling_gpt_neox import (
66
+ GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
67
+ GPTNeoXForCausalLM,
68
+ GPTNeoXForQuestionAnswering,
69
+ GPTNeoXForSequenceClassification,
70
+ GPTNeoXForTokenClassification,
71
+ GPTNeoXLayer,
72
+ GPTNeoXModel,
73
+ GPTNeoXPreTrainedModel,
74
+ )
75
+
76
+
77
+ else:
78
+ import sys
79
+
80
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/configuration_gpt_neox.cpython-310.pyc ADDED
Binary file (7.62 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/modeling_gpt_neox.cpython-310.pyc ADDED
Binary file (41.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/tokenization_gpt_neox_fast.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/configuration_gpt_neox.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPTNeoX model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GPTNeoXConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an
30
+ GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the GPTNeoX
32
+ [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50432):
40
+ Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`GPTNeoXModel`].
42
+ hidden_size (`int`, *optional*, defaults to 6144):
43
+ Dimension of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 44):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 64):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 24576):
49
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
53
+ rotary_pct (`float`, *optional*, defaults to 0.25):
54
+ percentage of hidden dimensions to allocate to rotary embeddings
55
+ rotary_emb_base (`int`, *optional*, defaults to 10000)
56
+ base for computing rotary embeddings frequency
57
+ attention_dropout (`float`, *optional*, defaults to 0.0):
58
+ The dropout ratio probability of the attention score.
59
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio of (1) the word embeddings, (2) the post-attention hidden states, and (3) the post-mlp
61
+ hidden states.
62
+ classifier_dropout (`float`, *optional*, defaults to 0.1):
63
+ Argument used when doing token classification, used in the model [`GPTNeoXForTokenClassification`].
64
+
65
+ The dropout ratio for the hidden layer.
66
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ initializer_range (`float`, *optional*, defaults to 1e-5):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ use_cache (`bool`, *optional*, defaults to `True`):
74
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
75
+ relevant if `config.is_decoder=True`.
76
+ use_parallel_residual (`bool`, *optional*, defaults to `True`):
77
+ Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training
78
+ speedup at large scales (e.g. 20B).
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
81
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
82
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
83
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
84
+ these scaling strategies behave:
85
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
86
+ experimental feature, subject to breaking API changes in future versions.
87
+ attention_bias (`bool`, *optional*, defaults to `True`):
88
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import GPTNeoXConfig, GPTNeoXModel
94
+
95
+ >>> # Initializing a GPTNeoX gpt-neox-20b style configuration
96
+ >>> configuration = GPTNeoXConfig()
97
+
98
+ >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration
99
+ >>> model = GPTNeoXModel(configuration) # doctest: +SKIP
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config # doctest: +SKIP
103
+ ```"""
104
+
105
+ model_type = "gpt_neox"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=50432,
111
+ hidden_size=6144,
112
+ num_hidden_layers=44,
113
+ num_attention_heads=64,
114
+ intermediate_size=24576,
115
+ hidden_act="gelu",
116
+ rotary_pct=0.25,
117
+ rotary_emb_base=10000,
118
+ attention_dropout=0.0,
119
+ hidden_dropout=0.0,
120
+ classifier_dropout=0.1,
121
+ max_position_embeddings=2048,
122
+ initializer_range=0.02,
123
+ layer_norm_eps=1e-5,
124
+ use_cache=True,
125
+ bos_token_id=0,
126
+ eos_token_id=2,
127
+ tie_word_embeddings=False,
128
+ use_parallel_residual=True,
129
+ rope_scaling=None,
130
+ attention_bias=True,
131
+ **kwargs,
132
+ ):
133
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
134
+ self.vocab_size = vocab_size
135
+ self.max_position_embeddings = max_position_embeddings
136
+ self.hidden_size = hidden_size
137
+ self.num_hidden_layers = num_hidden_layers
138
+ self.num_attention_heads = num_attention_heads
139
+ self.intermediate_size = intermediate_size
140
+ self.hidden_act = hidden_act
141
+ self.rotary_pct = rotary_pct
142
+ self.rotary_emb_base = rotary_emb_base
143
+ self.attention_dropout = attention_dropout
144
+ self.hidden_dropout = hidden_dropout
145
+ self.classifier_dropout = classifier_dropout
146
+ self.initializer_range = initializer_range
147
+ self.layer_norm_eps = layer_norm_eps
148
+ self.use_cache = use_cache
149
+ self.tie_word_embeddings = tie_word_embeddings
150
+ self.use_parallel_residual = use_parallel_residual
151
+ self.rope_scaling = rope_scaling
152
+ self.attention_bias = attention_bias
153
+ self._rope_scaling_validation()
154
+
155
+ if self.hidden_size % self.num_attention_heads != 0:
156
+ raise ValueError(
157
+ "The hidden size is not divisble by the number of attention heads! Make sure to update them!"
158
+ )
159
+
160
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
161
+ def _rope_scaling_validation(self):
162
+ """
163
+ Validate the `rope_scaling` configuration.
164
+ """
165
+ if self.rope_scaling is None:
166
+ return
167
+
168
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
169
+ raise ValueError(
170
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
171
+ )
172
+ rope_scaling_type = self.rope_scaling.get("type", None)
173
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
174
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
175
+ raise ValueError(
176
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
177
+ )
178
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
179
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py ADDED
@@ -0,0 +1,1426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPTNeoX model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+ from torch.nn import functional as F
24
+
25
+ from ...activations import ACT2FN
26
+ from ...file_utils import (
27
+ add_code_sample_docstrings,
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ replace_return_docstrings,
31
+ )
32
+ from ...modeling_outputs import (
33
+ BaseModelOutputWithPast,
34
+ CausalLMOutputWithPast,
35
+ QuestionAnsweringModelOutput,
36
+ SequenceClassifierOutputWithPast,
37
+ TokenClassifierOutput,
38
+ )
39
+ from ...modeling_utils import PreTrainedModel
40
+ from ...utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging
41
+ from .configuration_gpt_neox import GPTNeoXConfig
42
+
43
+
44
+ if is_flash_attn_2_available():
45
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
46
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM"
52
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neox-20b"
53
+ _CONFIG_FOR_DOC = "GPTNeoXConfig"
54
+
55
+
56
+ from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
60
+ def _get_unpad_data(attention_mask):
61
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
62
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
63
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
64
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
65
+ return (
66
+ indices,
67
+ cu_seqlens,
68
+ max_seqlen_in_batch,
69
+ )
70
+
71
+
72
+ class GPTNeoXPreTrainedModel(PreTrainedModel):
73
+ """
74
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
75
+ models.
76
+ """
77
+
78
+ config_class = GPTNeoXConfig
79
+ base_model_prefix = "gpt_neox"
80
+ supports_gradient_checkpointing = True
81
+ _no_split_modules = ["GPTNeoXLayer"]
82
+ _skip_keys_device_placement = "past_key_values"
83
+ _supports_flash_attn_2 = True
84
+
85
+ def _init_weights(self, module):
86
+ """Initialize the weights"""
87
+ if isinstance(module, nn.Linear):
88
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
89
+ if module.bias is not None:
90
+ module.bias.data.zero_()
91
+ elif isinstance(module, nn.Embedding):
92
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
93
+ if module.padding_idx is not None:
94
+ module.weight.data[module.padding_idx].zero_()
95
+ elif isinstance(module, nn.LayerNorm):
96
+ module.bias.data.zero_()
97
+ module.weight.data.fill_(1.0)
98
+
99
+
100
+ class GPTNeoXAttention(nn.Module):
101
+ def __init__(self, config):
102
+ super().__init__()
103
+ self.config = config
104
+ self.num_attention_heads = config.num_attention_heads
105
+ self.hidden_size = config.hidden_size
106
+ if self.hidden_size % self.num_attention_heads != 0:
107
+ raise ValueError(
108
+ "The hidden size is not divisble by the number of attention heads! Make sure to update them"
109
+ )
110
+ self.head_size = self.hidden_size // self.num_attention_heads
111
+ self.rotary_ndims = int(self.head_size * config.rotary_pct)
112
+ self._init_bias(config.max_position_embeddings)
113
+
114
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
115
+ self._init_rope()
116
+
117
+ self.norm_factor = self.head_size**-0.5
118
+ self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias)
119
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
120
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
121
+ self.is_causal = True
122
+
123
+ def _init_bias(self, max_positions, device=None):
124
+ self.register_buffer(
125
+ "bias",
126
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
127
+ 1, 1, max_positions, max_positions
128
+ ),
129
+ persistent=False,
130
+ )
131
+ if device is not None:
132
+ self.bias = self.bias.to(device)
133
+
134
+ def _init_rope(self):
135
+ if self.config.rope_scaling is None:
136
+ self.rotary_emb = GPTNeoXRotaryEmbedding(
137
+ self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base
138
+ )
139
+ else:
140
+ scaling_type = self.config.rope_scaling["type"]
141
+ scaling_factor = self.config.rope_scaling["factor"]
142
+ if scaling_type == "linear":
143
+ self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding(
144
+ self.rotary_ndims,
145
+ self.config.max_position_embeddings,
146
+ base=self.config.rotary_emb_base,
147
+ scaling_factor=scaling_factor,
148
+ )
149
+ elif scaling_type == "dynamic":
150
+ self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding(
151
+ self.rotary_ndims,
152
+ self.config.max_position_embeddings,
153
+ base=self.config.rotary_emb_base,
154
+ scaling_factor=scaling_factor,
155
+ )
156
+ else:
157
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
158
+
159
+ def forward(
160
+ self,
161
+ hidden_states: torch.FloatTensor,
162
+ attention_mask: torch.FloatTensor,
163
+ position_ids: torch.LongTensor,
164
+ head_mask: Optional[torch.FloatTensor] = None,
165
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
166
+ use_cache: Optional[bool] = False,
167
+ output_attentions: Optional[bool] = False,
168
+ padding_mask: Optional[torch.Tensor] = None,
169
+ ):
170
+ has_layer_past = layer_past is not None
171
+
172
+ # Compute QKV
173
+ # Attention heads [batch, seq_len, hidden_size]
174
+ # --> [batch, seq_len, (np * 3 * head_size)]
175
+ qkv = self.query_key_value(hidden_states)
176
+
177
+ # [batch, seq_len, (num_heads * 3 * head_size)]
178
+ # --> [batch, seq_len, num_heads, 3 * head_size]
179
+ new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
180
+ qkv = qkv.view(*new_qkv_shape)
181
+
182
+ # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
183
+ query = qkv[..., : self.head_size].permute(0, 2, 1, 3)
184
+ key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)
185
+ value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)
186
+
187
+ # Compute rotary embeddings on rotary_ndims
188
+ query_rot = query[..., : self.rotary_ndims]
189
+ query_pass = query[..., self.rotary_ndims :]
190
+ key_rot = key[..., : self.rotary_ndims]
191
+ key_pass = key[..., self.rotary_ndims :]
192
+
193
+ # Compute token offset for rotary embeddings (when decoding)
194
+ seq_len = key.shape[-2]
195
+ if has_layer_past:
196
+ seq_len += layer_past[0].shape[-2]
197
+ cos, sin = self.rotary_emb(value, seq_len=seq_len)
198
+ query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
199
+ query = torch.cat((query, query_pass), dim=-1)
200
+ key = torch.cat((key, key_pass), dim=-1)
201
+
202
+ # Cache QKV values
203
+ if has_layer_past:
204
+ past_key = layer_past[0]
205
+ past_value = layer_past[1]
206
+ key = torch.cat((past_key, key), dim=-2)
207
+ value = torch.cat((past_value, value), dim=-2)
208
+ present = (key, value) if use_cache else None
209
+
210
+ # Compute attention
211
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
212
+
213
+ # Reshape outputs
214
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)
215
+ attn_output = self.dense(attn_output)
216
+
217
+ outputs = (attn_output, present)
218
+ if output_attentions:
219
+ outputs += (attn_weights,)
220
+
221
+ return outputs
222
+
223
+ @classmethod
224
+ def _split_heads(cls, tensor, num_attention_heads, attn_head_size):
225
+ """
226
+ Splits hidden dim into attn_head_size and num_attention_heads
227
+ """
228
+ # tensor: [bs, seq_len, hidden_size]
229
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
230
+ # -> [bs, seq_len, num_attention_heads, attn_head_size]
231
+ tensor = tensor.view(new_shape)
232
+ # -> [bs, num_attention_heads, seq_len, attn_head_size]
233
+ tensor = tensor.permute(0, 2, 1, 3)
234
+ return tensor
235
+
236
+ @classmethod
237
+ def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):
238
+ """
239
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
240
+ """
241
+ # tensor [bs, num_attention_heads, seq_len, attn_head_size]
242
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
243
+ # -> [bs, seq_len, num_attention_heads, attn_head_size]
244
+ tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)
245
+ # -> [bs, seq_len, hidden_size]
246
+ return tensor
247
+
248
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
249
+ # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]
250
+ # compute causal mask from causal mask buffer
251
+ batch_size, num_attention_heads, query_length, attn_head_size = query.size()
252
+ key_length = key.size(-2)
253
+
254
+ # dynamically increase the causal mask with the key length, if needed.
255
+ if key_length > self.bias.shape[-1]:
256
+ self._init_bias(key_length, device=key.device)
257
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
258
+
259
+ query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)
260
+ key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)
261
+ attn_scores = torch.zeros(
262
+ batch_size * num_attention_heads,
263
+ query_length,
264
+ key_length,
265
+ dtype=query.dtype,
266
+ device=key.device,
267
+ )
268
+ attn_scores = torch.baddbmm(
269
+ attn_scores,
270
+ query,
271
+ key.transpose(1, 2),
272
+ beta=1.0,
273
+ alpha=self.norm_factor,
274
+ )
275
+ attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)
276
+
277
+ mask_value = torch.finfo(attn_scores.dtype).min
278
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
279
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
280
+ mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)
281
+ attn_scores = torch.where(causal_mask, attn_scores, mask_value)
282
+
283
+ if attention_mask is not None:
284
+ # Apply the attention mask
285
+ attn_scores = attn_scores + attention_mask
286
+
287
+ attn_weights = nn.functional.softmax(attn_scores, dim=-1)
288
+ attn_weights = attn_weights.to(value.dtype)
289
+
290
+ # Mask heads if we want to
291
+ if head_mask is not None:
292
+ attn_weights = attn_weights * head_mask
293
+
294
+ attn_weights = self.attention_dropout(attn_weights)
295
+
296
+ attn_output = torch.matmul(attn_weights, value)
297
+ return attn_output, attn_weights
298
+
299
+
300
+ class GPTNeoXFlashAttention2(GPTNeoXAttention):
301
+ """
302
+ GPTNeoX flash attention module. This module inherits from `GPTNeoXAttention` as the weights of the module stays
303
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
304
+ flash attention and deal with padding tokens in case the input contains any of them.
305
+ """
306
+
307
+ def __init__(self, *args, **kwargs):
308
+ super().__init__(*args, **kwargs)
309
+
310
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
311
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
312
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
313
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
314
+
315
+ def forward(
316
+ self,
317
+ hidden_states: torch.FloatTensor,
318
+ attention_mask: torch.FloatTensor,
319
+ position_ids: torch.LongTensor,
320
+ head_mask: Optional[torch.FloatTensor] = None,
321
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
322
+ use_cache: Optional[bool] = False,
323
+ output_attentions: Optional[bool] = False,
324
+ ):
325
+ has_layer_past = layer_past is not None
326
+
327
+ # Compute QKV
328
+ # Attention heads [batch, seq_len, hidden_size]
329
+ # --> [batch, seq_len, (np * 3 * head_size)]
330
+ qkv = self.query_key_value(hidden_states)
331
+
332
+ # [batch, seq_len, (num_heads * 3 * head_size)]
333
+ # --> [batch, seq_len, num_heads, 3 * head_size]
334
+ new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
335
+ qkv = qkv.view(*new_qkv_shape)
336
+
337
+ # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
338
+ query = qkv[..., : self.head_size].permute(0, 2, 1, 3)
339
+ key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)
340
+ value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)
341
+
342
+ query_length = query.shape[-2]
343
+
344
+ # Compute rotary embeddings on rotary_ndims
345
+ query_rot = query[..., : self.rotary_ndims]
346
+ query_pass = query[..., self.rotary_ndims :]
347
+ key_rot = key[..., : self.rotary_ndims]
348
+ key_pass = key[..., self.rotary_ndims :]
349
+
350
+ # Compute token offset for rotary embeddings (when decoding)
351
+ seq_len = key.shape[-2]
352
+ if has_layer_past:
353
+ seq_len += layer_past[0].shape[-2]
354
+ cos, sin = self.rotary_emb(value, seq_len=seq_len)
355
+ query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
356
+ query = torch.cat((query, query_pass), dim=-1)
357
+ key = torch.cat((key, key_pass), dim=-1)
358
+
359
+ # Cache QKV values
360
+ if has_layer_past:
361
+ past_key = layer_past[0]
362
+ past_value = layer_past[1]
363
+ key = torch.cat((past_key, key), dim=-2)
364
+ value = torch.cat((past_value, value), dim=-2)
365
+ present = (key, value) if use_cache else None
366
+
367
+ # GPT-neo-X casts query and key in fp32 to apply rotary embedding in full precision
368
+ target_dtype = value.dtype
369
+ if query.dtype != target_dtype:
370
+ query = query.to(target_dtype)
371
+ if key.dtype != target_dtype:
372
+ key = key.to(target_dtype)
373
+
374
+ # Permute to get the expected shape for Flash Attention
375
+ query = query.permute(0, 2, 1, 3)
376
+ key = key.permute(0, 2, 1, 3)
377
+ value = value.permute(0, 2, 1, 3)
378
+
379
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
380
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
381
+ # cast them back in float16 / bfloat16 just to be sure everything works as expected.
382
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
383
+ input_dtype = query.dtype
384
+ if input_dtype == torch.float32:
385
+ if torch.is_autocast_enabled():
386
+ target_dtype = torch.get_autocast_gpu_dtype()
387
+ # Handle the case where the model is quantized
388
+ elif hasattr(self.config, "_pre_quantization_dtype"):
389
+ target_dtype = self.config._pre_quantization_dtype
390
+ else:
391
+ target_dtype = self.query_key_value.weight.dtype
392
+
393
+ logger.warning_once(
394
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
395
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
396
+ f" {target_dtype}."
397
+ )
398
+
399
+ query = query.to(target_dtype)
400
+ key = key.to(target_dtype)
401
+ value = value.to(target_dtype)
402
+
403
+ attention_dropout = self.config.attention_dropout if self.training else 0.0
404
+
405
+ # Compute attention
406
+ attn_weights = self._flash_attention_forward(
407
+ query, key, value, attention_mask, query_length, dropout=attention_dropout, softmax_scale=self.norm_factor
408
+ )
409
+
410
+ # Reshape outputs
411
+ attn_output = attn_weights.reshape(
412
+ attn_weights.shape[0], attn_weights.shape[1], self.num_attention_heads * self.head_size
413
+ )
414
+ attn_output = self.dense(attn_output)
415
+
416
+ outputs = (attn_output, present)
417
+ if output_attentions:
418
+ outputs += (attn_weights,)
419
+
420
+ return outputs
421
+
422
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
423
+ def _flash_attention_forward(
424
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
425
+ ):
426
+ """
427
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
428
+ first unpad the input, then computes the attention scores and pad the final attention scores.
429
+
430
+ Args:
431
+ query_states (`torch.Tensor`):
432
+ Input query states to be passed to Flash Attention API
433
+ key_states (`torch.Tensor`):
434
+ Input key states to be passed to Flash Attention API
435
+ value_states (`torch.Tensor`):
436
+ Input value states to be passed to Flash Attention API
437
+ attention_mask (`torch.Tensor`):
438
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
439
+ position of padding tokens and 1 for the position of non-padding tokens.
440
+ dropout (`float`):
441
+ Attention dropout
442
+ softmax_scale (`float`, *optional*):
443
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
444
+ """
445
+ if not self._flash_attn_uses_top_left_mask:
446
+ causal = self.is_causal
447
+ else:
448
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
449
+ causal = self.is_causal and query_length != 1
450
+
451
+ # Contains at least one padding token in the sequence
452
+ if attention_mask is not None:
453
+ batch_size = query_states.shape[0]
454
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
455
+ query_states, key_states, value_states, attention_mask, query_length
456
+ )
457
+
458
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
459
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
460
+
461
+ attn_output_unpad = flash_attn_varlen_func(
462
+ query_states,
463
+ key_states,
464
+ value_states,
465
+ cu_seqlens_q=cu_seqlens_q,
466
+ cu_seqlens_k=cu_seqlens_k,
467
+ max_seqlen_q=max_seqlen_in_batch_q,
468
+ max_seqlen_k=max_seqlen_in_batch_k,
469
+ dropout_p=dropout,
470
+ softmax_scale=softmax_scale,
471
+ causal=causal,
472
+ )
473
+
474
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
475
+ else:
476
+ attn_output = flash_attn_func(
477
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
478
+ )
479
+
480
+ return attn_output
481
+
482
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads
483
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
484
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
485
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
486
+
487
+ key_layer = index_first_axis(
488
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
489
+ )
490
+ value_layer = index_first_axis(
491
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
492
+ )
493
+ if query_length == kv_seq_len:
494
+ query_layer = index_first_axis(
495
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k
496
+ )
497
+ cu_seqlens_q = cu_seqlens_k
498
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
499
+ indices_q = indices_k
500
+ elif query_length == 1:
501
+ max_seqlen_in_batch_q = 1
502
+ cu_seqlens_q = torch.arange(
503
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
504
+ ) # There is a memcpy here, that is very bad.
505
+ indices_q = cu_seqlens_q[:-1]
506
+ query_layer = query_layer.squeeze(1)
507
+ else:
508
+ # The -q_len: slice assumes left padding.
509
+ attention_mask = attention_mask[:, -query_length:]
510
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
511
+
512
+ return (
513
+ query_layer,
514
+ key_layer,
515
+ value_layer,
516
+ indices_q,
517
+ (cu_seqlens_q, cu_seqlens_k),
518
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
519
+ )
520
+
521
+
522
+ def attention_mask_func(attention_scores, ltor_mask):
523
+ attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min)
524
+ return attention_scores
525
+
526
+
527
+ class GPTNeoXRotaryEmbedding(nn.Module):
528
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding.__init__
529
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
530
+ super().__init__()
531
+
532
+ self.dim = dim
533
+ self.max_position_embeddings = max_position_embeddings
534
+ self.base = base
535
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
536
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
537
+
538
+ # Build here to make `torch.jit.trace` work.
539
+ self._set_cos_sin_cache(
540
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
541
+ )
542
+
543
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
544
+ self.max_seq_len_cached = seq_len
545
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
546
+
547
+ freqs = torch.outer(t, self.inv_freq)
548
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
549
+ emb = torch.cat((freqs, freqs), dim=-1)
550
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
551
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
552
+
553
+ def forward(self, x, seq_len=None):
554
+ # x: [bs, num_attention_heads, seq_len, head_size]
555
+ if seq_len > self.max_seq_len_cached:
556
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
557
+
558
+ return (
559
+ self.cos_cached[:seq_len],
560
+ self.sin_cached[:seq_len],
561
+ )
562
+
563
+
564
+ # copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding.__init__
565
+ # TODO @gante bring compatibility back
566
+ class GPTNeoXLinearScalingRotaryEmbedding(GPTNeoXRotaryEmbedding):
567
+ """GPTNeoXRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
568
+
569
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
570
+ self.scaling_factor = scaling_factor
571
+ super().__init__(dim, max_position_embeddings, base, device)
572
+
573
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
574
+ self.max_seq_len_cached = seq_len
575
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
576
+ t = t / self.scaling_factor
577
+
578
+ freqs = torch.outer(t, self.inv_freq)
579
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
580
+ emb = torch.cat((freqs, freqs), dim=-1)
581
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
582
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
583
+
584
+
585
+ class GPTNeoXDynamicNTKScalingRotaryEmbedding(GPTNeoXRotaryEmbedding):
586
+ """GPTNeoXRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
587
+
588
+ # copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding.__init__
589
+ # TODO @gante no longer copied from
590
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
591
+ self.scaling_factor = scaling_factor
592
+ super().__init__(dim, max_position_embeddings, base, device)
593
+
594
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
595
+ self.max_seq_len_cached = seq_len
596
+
597
+ if seq_len > self.max_position_embeddings:
598
+ base = self.base * (
599
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
600
+ ) ** (self.dim / (self.dim - 2))
601
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
602
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
603
+
604
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
605
+
606
+ freqs = torch.outer(t, self.inv_freq)
607
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
608
+ emb = torch.cat((freqs, freqs), dim=-1)
609
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
610
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
611
+
612
+
613
+ def rotate_half(x):
614
+ """Rotates half the hidden dims of the input."""
615
+ x1 = x[..., : x.shape[-1] // 2]
616
+ x2 = x[..., x.shape[-1] // 2 :]
617
+ return torch.cat((-x2, x1), dim=-1)
618
+
619
+
620
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
621
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
622
+ """Applies Rotary Position Embedding to the query and key tensors.
623
+
624
+ Args:
625
+ q (`torch.Tensor`): The query tensor.
626
+ k (`torch.Tensor`): The key tensor.
627
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
628
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
629
+ position_ids (`torch.Tensor`):
630
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
631
+ used to pass offsetted position ids when working with a KV-cache.
632
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
633
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
634
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
635
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
636
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
637
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
638
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
639
+ Returns:
640
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
641
+ """
642
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
643
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
644
+ q_embed = (q * cos) + (rotate_half(q) * sin)
645
+ k_embed = (k * cos) + (rotate_half(k) * sin)
646
+ return q_embed, k_embed
647
+
648
+
649
+ class GPTNeoXMLP(nn.Module):
650
+ def __init__(self, config):
651
+ super().__init__()
652
+ self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)
653
+ self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)
654
+ self.act = ACT2FN[config.hidden_act]
655
+
656
+ def forward(self, hidden_states):
657
+ hidden_states = self.dense_h_to_4h(hidden_states)
658
+ hidden_states = self.act(hidden_states)
659
+ hidden_states = self.dense_4h_to_h(hidden_states)
660
+ return hidden_states
661
+
662
+
663
+ GPT_NEOX_ATTENTION_CLASSES = {
664
+ "eager": GPTNeoXAttention,
665
+ "flash_attention_2": GPTNeoXFlashAttention2,
666
+ }
667
+
668
+
669
+ class GPTNeoXLayer(nn.Module):
670
+ def __init__(self, config):
671
+ super().__init__()
672
+ self.use_parallel_residual = config.use_parallel_residual
673
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
674
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
675
+ self.post_attention_dropout = nn.Dropout(config.hidden_dropout)
676
+ self.post_mlp_dropout = nn.Dropout(config.hidden_dropout)
677
+ self.attention = GPT_NEOX_ATTENTION_CLASSES[config._attn_implementation](config)
678
+ self.mlp = GPTNeoXMLP(config)
679
+
680
+ def forward(
681
+ self,
682
+ hidden_states: Optional[torch.FloatTensor],
683
+ attention_mask: Optional[torch.FloatTensor] = None,
684
+ position_ids: Optional[torch.LongTensor] = None,
685
+ head_mask: Optional[torch.FloatTensor] = None,
686
+ use_cache: Optional[bool] = False,
687
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
688
+ output_attentions: Optional[bool] = False,
689
+ ):
690
+ attention_layer_outputs = self.attention(
691
+ self.input_layernorm(hidden_states),
692
+ attention_mask=attention_mask,
693
+ position_ids=position_ids,
694
+ layer_past=layer_past,
695
+ head_mask=head_mask,
696
+ use_cache=use_cache,
697
+ output_attentions=output_attentions,
698
+ )
699
+ attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)
700
+ attn_output = self.post_attention_dropout(attn_output)
701
+ outputs = attention_layer_outputs[1:]
702
+
703
+ if self.use_parallel_residual:
704
+ # pseudocode:
705
+ # x = x + attn(ln1(x)) + mlp(ln2(x))
706
+ mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))
707
+ mlp_output = self.post_mlp_dropout(mlp_output)
708
+ hidden_states = mlp_output + attn_output + hidden_states
709
+ else:
710
+ # pseudocode:
711
+ # x = x + attn(ln1(x))
712
+ # x = x + mlp(ln2(x))
713
+ attn_output = attn_output + hidden_states
714
+ mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
715
+ mlp_output = self.post_mlp_dropout(mlp_output)
716
+ hidden_states = mlp_output + attn_output
717
+
718
+ if use_cache:
719
+ outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)
720
+ else:
721
+ outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)
722
+
723
+ return outputs
724
+
725
+
726
+ GPT_NEOX_START_DOCSTRING = r"""
727
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
728
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
729
+ behavior.
730
+
731
+ Parameters:
732
+ config ([`~GPTNeoXConfig`]): Model configuration class with all the parameters of the model.
733
+ Initializing with a config file does not load the weights associated with the model, only the
734
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
735
+ """
736
+
737
+ GPT_NEOX_INPUTS_DOCSTRING = r"""
738
+ Args:
739
+ input_ids (`torch.LongTensor` of shape `({0})`):
740
+ Indices of input sequence tokens in the vocabulary.
741
+
742
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
743
+ [`PreTrainedTokenizer.__call__`] for details.
744
+
745
+ [What are input IDs?](../glossary#input-ids)
746
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
747
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
748
+
749
+ - 1 for tokens that are **not masked**,
750
+ - 0 for tokens that are **masked**.
751
+
752
+ [What are attention masks?](../glossary#attention-mask)
753
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
754
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
755
+ config.n_positions - 1]`.
756
+
757
+ [What are position IDs?](../glossary#position-ids)
758
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
759
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
760
+
761
+ - 1 indicates the head is **not masked**,
762
+ - 0 indicates the head is **masked**.
763
+
764
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
765
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
766
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
767
+ model's internal embedding lookup matrix.
768
+ output_attentions (`bool`, *optional*):
769
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
770
+ tensors for more detail.
771
+ output_hidden_states (`bool`, *optional*):
772
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
773
+ more detail.
774
+ return_dict (`bool`, *optional*):
775
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
776
+ """
777
+
778
+
779
+ @add_start_docstrings(
780
+ "The bare GPTNeoX Model transformer outputting raw hidden-states without any specific head on top.",
781
+ GPT_NEOX_START_DOCSTRING,
782
+ )
783
+ class GPTNeoXModel(GPTNeoXPreTrainedModel):
784
+ def __init__(self, config):
785
+ super().__init__(config)
786
+ self.config = config
787
+
788
+ self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size)
789
+ self.emb_dropout = nn.Dropout(config.hidden_dropout)
790
+ self.layers = nn.ModuleList([GPTNeoXLayer(config) for _ in range(config.num_hidden_layers)])
791
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
792
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
793
+
794
+ self.gradient_checkpointing = False
795
+
796
+ # Initialize weights and apply final processing
797
+ self.post_init()
798
+
799
+ def get_input_embeddings(self):
800
+ return self.embed_in
801
+
802
+ def set_input_embeddings(self, value):
803
+ self.embed_in = value
804
+
805
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
806
+ @add_code_sample_docstrings(
807
+ checkpoint=_CHECKPOINT_FOR_DOC,
808
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
809
+ output_type=BaseModelOutputWithPast,
810
+ config_class=_CONFIG_FOR_DOC,
811
+ )
812
+ def forward(
813
+ self,
814
+ input_ids: Optional[torch.LongTensor] = None,
815
+ attention_mask: Optional[torch.FloatTensor] = None,
816
+ position_ids: Optional[torch.LongTensor] = None,
817
+ head_mask: Optional[torch.FloatTensor] = None,
818
+ inputs_embeds: Optional[torch.FloatTensor] = None,
819
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
820
+ use_cache: Optional[bool] = None,
821
+ output_attentions: Optional[bool] = None,
822
+ output_hidden_states: Optional[bool] = None,
823
+ return_dict: Optional[bool] = None,
824
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
825
+ r"""
826
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
827
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
828
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
829
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
830
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
831
+ use_cache (`bool`, *optional*):
832
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
833
+ `past_key_values`).
834
+ """
835
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
836
+ output_hidden_states = (
837
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
838
+ )
839
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
840
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
841
+
842
+ if input_ids is not None and inputs_embeds is not None:
843
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
844
+ elif input_ids is not None:
845
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
846
+ input_shape = input_ids.size()
847
+ elif inputs_embeds is not None:
848
+ input_shape = inputs_embeds.size()[:-1]
849
+ else:
850
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
851
+
852
+ batch_size, seq_length = input_shape
853
+
854
+ if past_key_values is None:
855
+ past_length = 0
856
+ past_key_values = tuple([None] * self.config.num_hidden_layers)
857
+ else:
858
+ past_length = past_key_values[0][0].size(-2)
859
+
860
+ if position_ids is None:
861
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
862
+ position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device)
863
+ position_ids = position_ids.unsqueeze(0)
864
+
865
+ # Attention mask.
866
+ if attention_mask is not None:
867
+ assert batch_size > 0, "batch_size has to be defined and > 0"
868
+ attention_mask = attention_mask.view(batch_size, -1)
869
+ if self._use_flash_attention_2:
870
+ attention_mask = attention_mask if 0 in attention_mask else None
871
+ else:
872
+ # We create a 3D attention mask from a 2D tensor mask.
873
+ # Sizes are [batch_size, 1, 1, to_seq_length]
874
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
875
+ # this attention mask is more simple than the triangular masking of causal attention
876
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
877
+ attention_mask = attention_mask[:, None, None, :]
878
+
879
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
880
+ # masked positions, this operation will create a tensor which is 0.0 for
881
+ # positions we want to attend and the dtype's smallest value for masked positions.
882
+ # Since we are adding it to the raw scores before the softmax, this is
883
+ # effectively the same as removing these entirely.
884
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
885
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
886
+
887
+ # Prepare head mask if needed
888
+ # 1.0 in head_mask indicate we keep the head
889
+ # attention_probs has shape bsz x n_heads x N x N
890
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
891
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
892
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
893
+
894
+ if inputs_embeds is None:
895
+ inputs_embeds = self.embed_in(input_ids)
896
+
897
+ hidden_states = self.emb_dropout(inputs_embeds)
898
+
899
+ if self.gradient_checkpointing and self.training:
900
+ if use_cache:
901
+ logger.warning(
902
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
903
+ )
904
+ use_cache = False
905
+
906
+ presents = () if use_cache else None
907
+ all_attentions = () if output_attentions else None
908
+ all_hidden_states = () if output_hidden_states else None
909
+ for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
910
+ if output_hidden_states:
911
+ all_hidden_states = all_hidden_states + (hidden_states,)
912
+
913
+ if self.gradient_checkpointing and self.training:
914
+ outputs = self._gradient_checkpointing_func(
915
+ layer.__call__,
916
+ hidden_states,
917
+ attention_mask,
918
+ position_ids,
919
+ head_mask[i],
920
+ use_cache,
921
+ None,
922
+ output_attentions,
923
+ )
924
+ else:
925
+ outputs = layer(
926
+ hidden_states,
927
+ attention_mask=attention_mask,
928
+ position_ids=position_ids,
929
+ head_mask=head_mask[i],
930
+ layer_past=layer_past,
931
+ use_cache=use_cache,
932
+ output_attentions=output_attentions,
933
+ )
934
+ hidden_states = outputs[0]
935
+ if use_cache is True:
936
+ presents = presents + (outputs[1],)
937
+ if output_attentions:
938
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
939
+
940
+ hidden_states = self.final_layer_norm(hidden_states)
941
+ # Add last hidden state
942
+ if output_hidden_states:
943
+ all_hidden_states = all_hidden_states + (hidden_states,)
944
+
945
+ if not return_dict:
946
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
947
+
948
+ return BaseModelOutputWithPast(
949
+ last_hidden_state=hidden_states,
950
+ past_key_values=presents,
951
+ hidden_states=all_hidden_states,
952
+ attentions=all_attentions,
953
+ )
954
+
955
+
956
+ @add_start_docstrings(
957
+ """GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.""", GPT_NEOX_START_DOCSTRING
958
+ )
959
+ class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):
960
+ _tied_weights_keys = ["embed_out.weight"]
961
+
962
+ def __init__(self, config):
963
+ super().__init__(config)
964
+
965
+ self.gpt_neox = GPTNeoXModel(config)
966
+ self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
967
+
968
+ # Initialize weights and apply final processing
969
+ self.post_init()
970
+
971
+ def get_output_embeddings(self):
972
+ return self.embed_out
973
+
974
+ def set_output_embeddings(self, new_embeddings):
975
+ self.embed_out = new_embeddings
976
+
977
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
978
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
979
+ def forward(
980
+ self,
981
+ input_ids: Optional[torch.LongTensor] = None,
982
+ attention_mask: Optional[torch.FloatTensor] = None,
983
+ position_ids: Optional[torch.LongTensor] = None,
984
+ inputs_embeds: Optional[torch.FloatTensor] = None,
985
+ head_mask: Optional[torch.FloatTensor] = None,
986
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
987
+ labels: Optional[torch.LongTensor] = None,
988
+ use_cache: Optional[bool] = None,
989
+ output_attentions: Optional[bool] = None,
990
+ output_hidden_states: Optional[bool] = None,
991
+ return_dict: Optional[bool] = None,
992
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
993
+ r"""
994
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
995
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
996
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
997
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are
998
+ only required when the model is used as a decoder in a Sequence to Sequence model.
999
+
1000
+ Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see
1001
+ `past_key_values` input) to speed up sequential decoding.
1002
+
1003
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1004
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1005
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1006
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1007
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1008
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1009
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
1010
+ use_cache (`bool`, *optional*):
1011
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1012
+ `past_key_values`).
1013
+
1014
+ Returns:
1015
+
1016
+ Example:
1017
+
1018
+ ```python
1019
+ >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig
1020
+ >>> import torch
1021
+
1022
+ >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
1023
+ >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b")
1024
+ >>> config.is_decoder = True
1025
+ >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config)
1026
+
1027
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1028
+ >>> outputs = model(**inputs)
1029
+
1030
+ >>> prediction_logits = outputs.logits
1031
+ ```"""
1032
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1033
+
1034
+ outputs = self.gpt_neox(
1035
+ input_ids,
1036
+ attention_mask=attention_mask,
1037
+ position_ids=position_ids,
1038
+ head_mask=head_mask,
1039
+ inputs_embeds=inputs_embeds,
1040
+ past_key_values=past_key_values,
1041
+ use_cache=use_cache,
1042
+ output_attentions=output_attentions,
1043
+ output_hidden_states=output_hidden_states,
1044
+ return_dict=return_dict,
1045
+ )
1046
+
1047
+ hidden_states = outputs[0]
1048
+ lm_logits = self.embed_out(hidden_states)
1049
+
1050
+ lm_loss = None
1051
+ if labels is not None:
1052
+ # move labels to correct device to enable model parallelism
1053
+ labels = labels.to(lm_logits.device)
1054
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1055
+ shift_logits = lm_logits[:, :-1, :].contiguous()
1056
+ labels = labels[:, 1:].contiguous()
1057
+ loss_fct = CrossEntropyLoss()
1058
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))
1059
+
1060
+ if not return_dict:
1061
+ output = (lm_logits,) + outputs[1:]
1062
+ return ((lm_loss,) + output) if lm_loss is not None else output
1063
+
1064
+ return CausalLMOutputWithPast(
1065
+ loss=lm_loss,
1066
+ logits=lm_logits,
1067
+ past_key_values=outputs.past_key_values,
1068
+ hidden_states=outputs.hidden_states,
1069
+ attentions=outputs.attentions,
1070
+ )
1071
+
1072
+ def prepare_inputs_for_generation(
1073
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1074
+ ):
1075
+ input_shape = input_ids.shape
1076
+ # cut decoder_input_ids if past is used
1077
+ if past_key_values is not None:
1078
+ past_length = past_key_values[0][0].shape[2]
1079
+
1080
+ # Some generation methods already pass only the last input ID
1081
+ if input_ids.shape[1] > past_length:
1082
+ remove_prefix_length = past_length
1083
+ else:
1084
+ # Default to old behavior: keep only final ID
1085
+ remove_prefix_length = input_ids.shape[1] - 1
1086
+
1087
+ input_ids = input_ids[:, remove_prefix_length:]
1088
+
1089
+ position_ids = kwargs.get("position_ids", None)
1090
+ if attention_mask is not None and position_ids is None:
1091
+ # create position_ids on the fly for batch generation
1092
+ position_ids = attention_mask.long().cumsum(-1) - 1
1093
+ position_ids.masked_fill_(attention_mask == 0, 1)
1094
+ if past_key_values:
1095
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1096
+
1097
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1098
+ if attention_mask is None:
1099
+ attention_mask = input_ids.new_ones(input_shape)
1100
+
1101
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1102
+ if inputs_embeds is not None and past_key_values is None:
1103
+ model_inputs = {"inputs_embeds": inputs_embeds}
1104
+ else:
1105
+ model_inputs = {"input_ids": input_ids}
1106
+ model_inputs.update(
1107
+ {
1108
+ "attention_mask": attention_mask,
1109
+ "past_key_values": past_key_values,
1110
+ "position_ids": position_ids,
1111
+ }
1112
+ )
1113
+
1114
+ return model_inputs
1115
+
1116
+ def _reorder_cache(self, past_key_values, beam_idx):
1117
+ reordered_past = ()
1118
+ for layer_past in past_key_values:
1119
+ reordered_past += (
1120
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1121
+ + layer_past[2:],
1122
+ )
1123
+ return reordered_past
1124
+
1125
+
1126
+ @add_start_docstrings(
1127
+ """
1128
+ The GPTNeoX Model transformer with a sequence classification head on top (linear layer).
1129
+
1130
+ [`GPTNeoXForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1131
+ (e.g. GPT-1) do.
1132
+
1133
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1134
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1135
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1136
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1137
+ each row of the batch).
1138
+ """,
1139
+ GPT_NEOX_START_DOCSTRING,
1140
+ )
1141
+ class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel):
1142
+ def __init__(self, config):
1143
+ super().__init__(config)
1144
+ self.num_labels = config.num_labels
1145
+ self.gpt_neox = GPTNeoXModel(config)
1146
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1147
+
1148
+ # Initialize weights and apply final processing
1149
+ self.post_init()
1150
+
1151
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING)
1152
+ @add_code_sample_docstrings(
1153
+ checkpoint=_CHECKPOINT_FOR_DOC,
1154
+ output_type=SequenceClassifierOutputWithPast,
1155
+ config_class=_CONFIG_FOR_DOC,
1156
+ )
1157
+ def forward(
1158
+ self,
1159
+ input_ids: Optional[torch.LongTensor] = None,
1160
+ attention_mask: Optional[torch.FloatTensor] = None,
1161
+ position_ids: Optional[torch.LongTensor] = None,
1162
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1163
+ head_mask: Optional[torch.FloatTensor] = None,
1164
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1165
+ labels: Optional[torch.LongTensor] = None,
1166
+ use_cache: Optional[bool] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
1171
+ r"""
1172
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1173
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1174
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1175
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1176
+ """
1177
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1178
+
1179
+ outputs = self.gpt_neox(
1180
+ input_ids,
1181
+ attention_mask=attention_mask,
1182
+ position_ids=position_ids,
1183
+ head_mask=head_mask,
1184
+ inputs_embeds=inputs_embeds,
1185
+ past_key_values=past_key_values,
1186
+ use_cache=use_cache,
1187
+ output_attentions=output_attentions,
1188
+ output_hidden_states=output_hidden_states,
1189
+ return_dict=return_dict,
1190
+ )
1191
+ hidden_states = outputs[0]
1192
+ logits = self.score(hidden_states)
1193
+
1194
+ if input_ids is not None:
1195
+ batch_size, sequence_length = input_ids.shape[:2]
1196
+ else:
1197
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1198
+
1199
+ if self.config.pad_token_id is None and batch_size != 1:
1200
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1201
+ if self.config.pad_token_id is None:
1202
+ sequence_lengths = -1
1203
+ else:
1204
+ if input_ids is not None:
1205
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1206
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1207
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1208
+ sequence_lengths = sequence_lengths.to(logits.device)
1209
+ else:
1210
+ sequence_lengths = -1
1211
+ logger.warning(
1212
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1213
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1214
+ )
1215
+
1216
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1217
+
1218
+ loss = None
1219
+ if labels is not None:
1220
+ labels = labels.to(logits.device)
1221
+ if self.config.problem_type is None:
1222
+ if self.num_labels == 1:
1223
+ self.config.problem_type = "regression"
1224
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1225
+ self.config.problem_type = "single_label_classification"
1226
+ else:
1227
+ self.config.problem_type = "multi_label_classification"
1228
+
1229
+ if self.config.problem_type == "regression":
1230
+ loss_fct = MSELoss()
1231
+ if self.num_labels == 1:
1232
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1233
+ else:
1234
+ loss = loss_fct(pooled_logits, labels)
1235
+ elif self.config.problem_type == "single_label_classification":
1236
+ loss_fct = CrossEntropyLoss()
1237
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1238
+ elif self.config.problem_type == "multi_label_classification":
1239
+ loss_fct = BCEWithLogitsLoss()
1240
+ loss = loss_fct(pooled_logits, labels)
1241
+ if not return_dict:
1242
+ output = (pooled_logits,) + outputs[1:]
1243
+ return ((loss,) + output) if loss is not None else output
1244
+
1245
+ return SequenceClassifierOutputWithPast(
1246
+ loss=loss,
1247
+ logits=pooled_logits,
1248
+ past_key_values=outputs.past_key_values,
1249
+ hidden_states=outputs.hidden_states,
1250
+ attentions=outputs.attentions,
1251
+ )
1252
+
1253
+
1254
+ class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel):
1255
+ def __init__(self, config):
1256
+ super().__init__(config)
1257
+ self.num_labels = config.num_labels
1258
+
1259
+ self.gpt_neox = GPTNeoXModel(config)
1260
+ self.dropout = nn.Dropout(config.classifier_dropout)
1261
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1262
+
1263
+ # Initialize weights and apply final processing
1264
+ self.post_init()
1265
+
1266
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING)
1267
+ @add_code_sample_docstrings(
1268
+ checkpoint="LarsJonasson/pythia-410m-deduped-sft-swedish",
1269
+ output_type=TokenClassifierOutput,
1270
+ config_class=_CONFIG_FOR_DOC,
1271
+ expected_loss=0.25,
1272
+ )
1273
+ def forward(
1274
+ self,
1275
+ input_ids: Optional[torch.LongTensor] = None,
1276
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1277
+ attention_mask: Optional[torch.FloatTensor] = None,
1278
+ token_type_ids: Optional[torch.LongTensor] = None,
1279
+ position_ids: Optional[torch.LongTensor] = None,
1280
+ head_mask: Optional[torch.FloatTensor] = None,
1281
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1282
+ labels: Optional[torch.LongTensor] = None,
1283
+ use_cache: Optional[bool] = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ ) -> Union[Tuple, TokenClassifierOutput]:
1288
+ r"""
1289
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1290
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1291
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1292
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1293
+ """
1294
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1295
+
1296
+ outputs = self.gpt_neox(
1297
+ input_ids,
1298
+ past_key_values=past_key_values,
1299
+ attention_mask=attention_mask,
1300
+ position_ids=position_ids,
1301
+ head_mask=head_mask,
1302
+ inputs_embeds=inputs_embeds,
1303
+ use_cache=use_cache,
1304
+ output_attentions=output_attentions,
1305
+ output_hidden_states=output_hidden_states,
1306
+ return_dict=return_dict,
1307
+ )
1308
+
1309
+ hidden_states = outputs[0]
1310
+ hidden_states = self.dropout(hidden_states)
1311
+ logits = self.classifier(hidden_states)
1312
+
1313
+ loss = None
1314
+ if labels is not None:
1315
+ labels = labels.to(logits.device)
1316
+ loss_fct = CrossEntropyLoss()
1317
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[2:]
1321
+ return ((loss,) + output) if loss is not None else output
1322
+
1323
+ return TokenClassifierOutput(
1324
+ loss=loss,
1325
+ logits=logits,
1326
+ hidden_states=outputs.hidden_states,
1327
+ attentions=outputs.attentions,
1328
+ )
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ """
1333
+ The GPT-NeoX Model transformer with a span classification head on top for extractive question-answering tasks like
1334
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1335
+ """,
1336
+ GPT_NEOX_START_DOCSTRING,
1337
+ )
1338
+ class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel):
1339
+ def __init__(self, config):
1340
+ super().__init__(config)
1341
+ self.num_labels = config.num_labels
1342
+ self.gpt_neox = GPTNeoXModel(config)
1343
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1344
+
1345
+ # Initialize weights and apply final processing
1346
+ self.post_init()
1347
+
1348
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1349
+ @add_code_sample_docstrings(
1350
+ checkpoint=_CHECKPOINT_FOR_DOC,
1351
+ output_type=QuestionAnsweringModelOutput,
1352
+ config_class=_CONFIG_FOR_DOC,
1353
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1354
+ )
1355
+ def forward(
1356
+ self,
1357
+ input_ids: Optional[torch.LongTensor] = None,
1358
+ attention_mask: Optional[torch.FloatTensor] = None,
1359
+ token_type_ids: Optional[torch.LongTensor] = None,
1360
+ position_ids: Optional[torch.LongTensor] = None,
1361
+ head_mask: Optional[torch.FloatTensor] = None,
1362
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1363
+ start_positions: Optional[torch.LongTensor] = None,
1364
+ end_positions: Optional[torch.LongTensor] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ return_dict: Optional[bool] = None,
1368
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1369
+ r"""
1370
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1371
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1372
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1373
+ are not taken into account for computing the loss.
1374
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ outputs = self.gpt_neox(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ position_ids=position_ids,
1385
+ head_mask=head_mask,
1386
+ inputs_embeds=inputs_embeds,
1387
+ output_attentions=output_attentions,
1388
+ output_hidden_states=output_hidden_states,
1389
+ return_dict=return_dict,
1390
+ )
1391
+
1392
+ sequence_output = outputs[0]
1393
+
1394
+ logits = self.qa_outputs(sequence_output)
1395
+ start_logits, end_logits = logits.split(1, dim=-1)
1396
+ start_logits = start_logits.squeeze(-1).contiguous()
1397
+ end_logits = end_logits.squeeze(-1).contiguous()
1398
+
1399
+ total_loss = None
1400
+ if start_positions is not None and end_positions is not None:
1401
+ # If we are on multi-GPU, split add a dimension
1402
+ if len(start_positions.size()) > 1:
1403
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1404
+ if len(end_positions.size()) > 1:
1405
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1406
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1407
+ ignored_index = start_logits.size(1)
1408
+ start_positions = start_positions.clamp(0, ignored_index)
1409
+ end_positions = end_positions.clamp(0, ignored_index)
1410
+
1411
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1412
+ start_loss = loss_fct(start_logits, start_positions)
1413
+ end_loss = loss_fct(end_logits, end_positions)
1414
+ total_loss = (start_loss + end_loss) / 2
1415
+
1416
+ if not return_dict:
1417
+ output = (start_logits, end_logits) + outputs[2:]
1418
+ return ((total_loss,) + output) if total_loss is not None else output
1419
+
1420
+ return QuestionAnsweringModelOutput(
1421
+ loss=total_loss,
1422
+ start_logits=start_logits,
1423
+ end_logits=end_logits,
1424
+ hidden_states=outputs.hidden_states,
1425
+ attentions=outputs.attentions,
1426
+ )
venv/lib/python3.10/site-packages/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for GPTNeoX."""
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import pre_tokenizers, processors
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
28
+
29
+
30
+ class GPTNeoXTokenizerFast(PreTrainedTokenizerFast):
31
+ """
32
+ Construct a "fast" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
33
+ Byte-Pair-Encoding.
34
+
35
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
36
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
37
+
38
+ ```python
39
+ >>> from transformers import GPTNeoXTokenizerFast
40
+
41
+ >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("openai-community/gpt2")
42
+ >>> tokenizer("Hello world")["input_ids"]
43
+ [15496, 995]
44
+
45
+ >>> tokenizer(" Hello world")["input_ids"]
46
+ [18435, 995]
47
+ ```
48
+
49
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
50
+ the model was not pretrained this way, it might yield a decrease in performance.
51
+
52
+ <Tip>
53
+
54
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
55
+
56
+ </Tip>
57
+
58
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
59
+ refer to this superclass for more information regarding those methods.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ Path to the vocabulary file.
64
+ merges_file (`str`):
65
+ Path to the merges file.
66
+ errors (`str`, *optional*, defaults to `"replace"`):
67
+ Paradigm to follow when decoding bytes to UTF-8. See
68
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
69
+ unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
73
+ The beginning of sequence token.
74
+ eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
75
+ The end of sequence token.
76
+ pad_token (`str`, *optional*):
77
+ Token for padding a sequence.
78
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
79
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
80
+ other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).
81
+ add_bos_token (`bool`, *optional*, defaults to `False`):
82
+ Whether or not to add a `bos_token` at the start of sequences.
83
+ add_eos_token (`bool`, *optional*, defaults to `False`):
84
+ Whether or not to add an `eos_token` at the end of sequences.
85
+ trim_offsets (`bool`, *optional*, defaults to `True`):
86
+ Whether or not the post-processing step should trim offsets to avoid including whitespaces.
87
+ """
88
+
89
+ vocab_files_names = VOCAB_FILES_NAMES
90
+ model_input_names = ["input_ids", "attention_mask"]
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_file=None,
95
+ merges_file=None,
96
+ tokenizer_file=None,
97
+ unk_token="<|endoftext|>",
98
+ bos_token="<|endoftext|>",
99
+ eos_token="<|endoftext|>",
100
+ pad_token=None,
101
+ add_bos_token=False,
102
+ add_eos_token=False,
103
+ add_prefix_space=False,
104
+ **kwargs,
105
+ ):
106
+ super().__init__(
107
+ vocab_file,
108
+ merges_file,
109
+ tokenizer_file=tokenizer_file,
110
+ unk_token=unk_token,
111
+ bos_token=bos_token,
112
+ eos_token=eos_token,
113
+ pad_token=pad_token,
114
+ add_bos_token=add_bos_token,
115
+ add_eos_token=add_eos_token,
116
+ add_prefix_space=add_prefix_space,
117
+ **kwargs,
118
+ )
119
+
120
+ self._add_bos_token = add_bos_token
121
+ self._add_eos_token = add_eos_token
122
+ self.update_post_processor()
123
+
124
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
125
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
126
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
127
+ pre_tok_state["add_prefix_space"] = add_prefix_space
128
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
129
+
130
+ self.add_prefix_space = add_prefix_space
131
+
132
+ @property
133
+ def add_eos_token(self):
134
+ return self._add_eos_token
135
+
136
+ @property
137
+ def add_bos_token(self):
138
+ return self._add_bos_token
139
+
140
+ @add_eos_token.setter
141
+ def add_eos_token(self, value):
142
+ self._add_eos_token = value
143
+ self.update_post_processor()
144
+
145
+ @add_bos_token.setter
146
+ def add_bos_token(self, value):
147
+ self._add_bos_token = value
148
+ self.update_post_processor()
149
+
150
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor
151
+ def update_post_processor(self):
152
+ """
153
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
154
+ """
155
+ bos = self.bos_token
156
+ bos_token_id = self.bos_token_id
157
+ if bos is None and self.add_bos_token:
158
+ raise ValueError("add_bos_token = True but bos_token = None")
159
+
160
+ eos = self.eos_token
161
+ eos_token_id = self.eos_token_id
162
+ if eos is None and self.add_eos_token:
163
+ raise ValueError("add_eos_token = True but eos_token = None")
164
+
165
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
166
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
167
+
168
+ special_tokens = []
169
+ if self.add_bos_token:
170
+ special_tokens.append((bos, bos_token_id))
171
+ if self.add_eos_token:
172
+ special_tokens.append((eos, eos_token_id))
173
+ self._tokenizer.post_processor = processors.TemplateProcessing(
174
+ single=single, pair=pair, special_tokens=special_tokens
175
+ )
176
+
177
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask
178
+ def get_special_tokens_mask(
179
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
180
+ ) -> List[int]:
181
+ """
182
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
183
+ special tokens using the tokenizer `prepare_for_model` method.
184
+
185
+ Args:
186
+ token_ids_0 (`List[int]`):
187
+ List of IDs.
188
+ token_ids_1 (`List[int]`, *optional*):
189
+ Optional second list of IDs for sequence pairs.
190
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
191
+ Whether or not the token list is already formatted with special tokens for the model.
192
+
193
+ Returns:
194
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
195
+ """
196
+ if already_has_special_tokens:
197
+ return super().get_special_tokens_mask(
198
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
199
+ )
200
+
201
+ bos_token_id = [1] if self.add_bos_token else []
202
+ eos_token_id = [1] if self.add_eos_token else []
203
+
204
+ if token_ids_1 is None:
205
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
206
+ return (
207
+ bos_token_id
208
+ + ([0] * len(token_ids_0))
209
+ + eos_token_id
210
+ + bos_token_id
211
+ + ([0] * len(token_ids_1))
212
+ + eos_token_id
213
+ )
214
+
215
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens
216
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
217
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
218
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
219
+
220
+ output = bos_token_id + token_ids_0 + eos_token_id
221
+
222
+ if token_ids_1 is not None:
223
+ output = output + bos_token_id + token_ids_1 + eos_token_id
224
+
225
+ return output
226
+
227
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
228
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
229
+ return tuple(files)
230
+
231
+ @property
232
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template
233
+ def default_chat_template(self):
234
+ """
235
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
236
+ """
237
+ logger.warning_once(
238
+ "\nNo chat template is defined for this tokenizer - using the default template "
239
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
240
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
241
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
242
+ )
243
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
venv/lib/python3.10/site-packages/transformers/models/layoutlmv2/__init__.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ is_vision_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
28
+ "processing_layoutlmv2": ["LayoutLMv2Processor"],
29
+ "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_layoutlmv2_fast"] = ["LayoutLMv2TokenizerFast"]
39
+
40
+ try:
41
+ if not is_vision_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["feature_extraction_layoutlmv2"] = ["LayoutLMv2FeatureExtractor"]
47
+ _import_structure["image_processing_layoutlmv2"] = ["LayoutLMv2ImageProcessor"]
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ _import_structure["modeling_layoutlmv2"] = [
56
+ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
57
+ "LayoutLMv2ForQuestionAnswering",
58
+ "LayoutLMv2ForSequenceClassification",
59
+ "LayoutLMv2ForTokenClassification",
60
+ "LayoutLMv2Layer",
61
+ "LayoutLMv2Model",
62
+ "LayoutLMv2PreTrainedModel",
63
+ ]
64
+
65
+ if TYPE_CHECKING:
66
+ from .configuration_layoutlmv2 import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv2Config
67
+ from .processing_layoutlmv2 import LayoutLMv2Processor
68
+ from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer
69
+
70
+ try:
71
+ if not is_tokenizers_available():
72
+ raise OptionalDependencyNotAvailable()
73
+ except OptionalDependencyNotAvailable:
74
+ pass
75
+ else:
76
+ from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast
77
+
78
+ try:
79
+ if not is_vision_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .feature_extraction_layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor
85
+
86
+ try:
87
+ if not is_torch_available():
88
+ raise OptionalDependencyNotAvailable()
89
+ except OptionalDependencyNotAvailable:
90
+ pass
91
+ else:
92
+ from .modeling_layoutlmv2 import (
93
+ LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
94
+ LayoutLMv2ForQuestionAnswering,
95
+ LayoutLMv2ForSequenceClassification,
96
+ LayoutLMv2ForTokenClassification,
97
+ LayoutLMv2Layer,
98
+ LayoutLMv2Model,
99
+ LayoutLMv2PreTrainedModel,
100
+ )
101
+ else:
102
+ import sys
103
+
104
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)