applied-ai-018 commited on
Commit
90f1cff
·
verified ·
1 Parent(s): 0da963b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  3. lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +28 -0
  4. lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json +810 -0
  5. lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json +1 -0
  6. venv/lib/python3.10/site-packages/transformers/models/altclip/__init__.py +71 -0
  7. venv/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py +402 -0
  8. venv/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py +1693 -0
  9. venv/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py +131 -0
  10. venv/lib/python3.10/site-packages/transformers/models/cpm/__init__.py +59 -0
  11. venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py +344 -0
  15. venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py +237 -0
  16. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py +75 -0
  17. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py +277 -0
  25. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +237 -0
  26. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py +43 -0
  27. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py +1553 -0
  28. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py +49 -0
  29. venv/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/jamba/__init__.py +58 -0
  31. venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py +223 -0
  35. venv/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py +1882 -0
  36. venv/lib/python3.10/site-packages/transformers/models/maskformer/__init__.py +86 -0
  37. venv/lib/python3.10/site-packages/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py +730 -0
  38. venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer_swin.py +912 -0
  39. venv/lib/python3.10/site-packages/transformers/models/mega/__init__.py +70 -0
  40. venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py +242 -0
  45. venv/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py +291 -0
  46. venv/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py +73 -0
  48. venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc +0 -0
ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:deac2a55fcd86f5cc694c603b449726cab93ec94aed43ae043638f87f246f947
3
+ size 33555612
ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d183029d9400f181c59684ec18be25a2b52abdeeae1b1fcb089b165c71c7d80
3
+ size 33555627
lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-14:16:34:29,286 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-14:16:34:33,982 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi']
4
+ 2024-05-14:16:34:33,984 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-14:16:34:33,984 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'}
6
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.)
7
+ return func(*args, **kwargs)
8
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
9
+ warnings.warn(
10
+ [2024-05-14 16:34:41,205] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect)
11
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead.
12
+ warnings.warn(
13
+ You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565
14
+ 2024-05-14:16:34:41,615 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean
15
+ 2024-05-14:16:34:41,616 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
16
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA
17
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
18
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
19
+ warnings.warn(
20
+ 2024-05-14:16:34:42,901 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
21
+ 2024-05-14:16:34:42,901 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
22
+ 2024-05-14:16:34:42,920 INFO [task.py:395] Building contexts for indiccopa-hi on rank 3...
23
+ Passed argument batch_size = auto:1. Detecting largest batch size
24
+ 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 107991.28it/s]
25
+ 2024-05-14:16:34:44,718 INFO [evaluator.py:379] Running loglikelihood requests
26
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
27
+ warnings.warn(
28
+ Determined largest batch size: 64
lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-14T16:34:29.154200",
5
+ "startedAt": "2024-05-14T16:34:28.708355",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100",
13
+ "--tasks",
14
+ "indiccopa-hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/data/cronscript/lm-evaluation-harness",
29
+ "host": "vizzhy-150-3",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 76,
33
+ "cpu_count_logical": 152,
34
+ "cpu_freq": {
35
+ "current": 3396.061605263158,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3300.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3300.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 3300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 3400.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 3280.656,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 3400.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 3400.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 3284.865,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 3400.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 3223.887,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 3400.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 3400.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 3244.473,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 3400.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3400.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 3400.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 3400.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 3400.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 3400.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 3400.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 3400.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 3400.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 3400.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 3400.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 3400.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 3400.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 3400.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 3400.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 3400.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 3400.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 3400.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 3400.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 3291.536,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 3400.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 3206.218,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 3400.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 3400.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 3400.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 3400.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 3400.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 3400.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 3400.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 3400.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 3400.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 3400.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 3400.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 3400.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 3400.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 3400.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 3400.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 3400.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 3400.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 3400.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 3400.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 3400.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 3400.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 3400.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 3400.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 3400.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 3400.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 3400.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 3400.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 3400.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 3400.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 3400.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 3400.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 3400.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 3400.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 3400.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 3400.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 3400.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 3400.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 3400.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 3400.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 3400.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 3400.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 3400.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 3400.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 3400.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 3400.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 3400.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 3400.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 3400.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 3400.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 3400.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 3400.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 3400.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 3400.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 3400.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 3400.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 3400.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 3400.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 3400.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 3220.102,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 3400.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 3400.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 3400.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 3400.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 3400.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 3300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 3400.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 3400.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 3400.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 3400.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 3400.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 3400.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 3400.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 3400.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 3400.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 3400.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 3400.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 3400.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 3400.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 3400.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 3400.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 3400.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 3400.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 3400.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 3400.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 3400.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 3400.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 3400.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 3400.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 3400.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 3400.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 3400.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 3400.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 3400.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 3400.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 3400.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 3400.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 3400.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 3400.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 3400.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 3400.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 3400.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 3400.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 3400.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 3400.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 3400.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 3400.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 3400.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3400.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3400.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 3400.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 3400.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 3400.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 3400.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3400.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 3400.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ }
800
+ ],
801
+ "disk": {
802
+ "/": {
803
+ "total": 866.4415092468262,
804
+ "used": 863.5445137023926
805
+ }
806
+ },
807
+ "memory": {
808
+ "total": 1007.5000267028809
809
+ }
810
+ }
lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 23}}
venv/lib/python3.10/site-packages/transformers/models/altclip/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_altclip": [
21
+ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "AltCLIPConfig",
23
+ "AltCLIPTextConfig",
24
+ "AltCLIPVisionConfig",
25
+ ],
26
+ "processing_altclip": ["AltCLIPProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_altclip"] = [
36
+ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "AltCLIPPreTrainedModel",
38
+ "AltCLIPModel",
39
+ "AltCLIPTextModel",
40
+ "AltCLIPVisionModel",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_altclip import (
46
+ ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ AltCLIPConfig,
48
+ AltCLIPTextConfig,
49
+ AltCLIPVisionConfig,
50
+ )
51
+ from .processing_altclip import AltCLIPProcessor
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_altclip import (
60
+ ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
61
+ AltCLIPModel,
62
+ AltCLIPPreTrainedModel,
63
+ AltCLIPTextModel,
64
+ AltCLIPVisionModel,
65
+ )
66
+
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AltCLIP model configuration"""
16
+ import os
17
+ from typing import Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class AltCLIPTextConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a
32
+ AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the AltCLIP
34
+ [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 250002):
42
+ Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`AltCLIPTextModel`].
44
+ hidden_size (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 24):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 4096):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout ratio for the attention probabilities.
59
+ max_position_embeddings (`int`, *optional*, defaults to 514):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ type_vocab_size (`int`, *optional*, defaults to 1):
63
+ The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`]
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ initializer_factor (`float`, *optional*, defaults to 0.02):
67
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
68
+ testing).
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
70
+ The epsilon used by the layer normalization layers.
71
+ pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token.
72
+ bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token.
73
+ eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
74
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
75
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
76
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
77
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
78
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
79
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
80
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
83
+ relevant if `config.is_decoder=True`.
84
+ project_dim (`int`, *optional*, defaults to 768):
85
+ The dimentions of the teacher model before the mapping layer.
86
+
87
+ Examples:
88
+
89
+ ```python
90
+ >>> from transformers import AltCLIPTextModel, AltCLIPTextConfig
91
+
92
+ >>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration
93
+ >>> configuration = AltCLIPTextConfig()
94
+
95
+ >>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration
96
+ >>> model = AltCLIPTextModel(configuration)
97
+
98
+ >>> # Accessing the model configuration
99
+ >>> configuration = model.config
100
+ ```"""
101
+
102
+ model_type = "altclip_text_model"
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_size=250002,
107
+ hidden_size=1024,
108
+ num_hidden_layers=24,
109
+ num_attention_heads=16,
110
+ intermediate_size=4096,
111
+ hidden_act="gelu",
112
+ hidden_dropout_prob=0.1,
113
+ attention_probs_dropout_prob=0.1,
114
+ max_position_embeddings=514,
115
+ type_vocab_size=1,
116
+ initializer_range=0.02,
117
+ initializer_factor=0.02,
118
+ layer_norm_eps=1e-05,
119
+ pad_token_id=1,
120
+ bos_token_id=0,
121
+ eos_token_id=2,
122
+ position_embedding_type="absolute",
123
+ use_cache=True,
124
+ project_dim=768,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
128
+
129
+ self.vocab_size = vocab_size
130
+ self.hidden_size = hidden_size
131
+ self.num_hidden_layers = num_hidden_layers
132
+ self.num_attention_heads = num_attention_heads
133
+ self.hidden_act = hidden_act
134
+ self.intermediate_size = intermediate_size
135
+ self.hidden_dropout_prob = hidden_dropout_prob
136
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.type_vocab_size = type_vocab_size
139
+ self.initializer_range = initializer_range
140
+ self.initializer_factor = initializer_factor
141
+ self.layer_norm_eps = layer_norm_eps
142
+ self.position_embedding_type = position_embedding_type
143
+ self.use_cache = use_cache
144
+ self.project_dim = project_dim
145
+
146
+
147
+ class AltCLIPVisionConfig(PretrainedConfig):
148
+ r"""
149
+ This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
150
+ AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
151
+ with the defaults will yield a similar configuration to that of the AltCLIP
152
+ [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
153
+
154
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
155
+ documentation from [`PretrainedConfig`] for more information.
156
+
157
+
158
+ Args:
159
+ hidden_size (`int`, *optional*, defaults to 768):
160
+ Dimensionality of the encoder layers and the pooler layer.
161
+ intermediate_size (`int`, *optional*, defaults to 3072):
162
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
163
+ projection_dim (`int`, *optional*, defaults to 512):
164
+ Dimentionality of text and vision projection layers.
165
+ num_hidden_layers (`int`, *optional*, defaults to 12):
166
+ Number of hidden layers in the Transformer encoder.
167
+ num_attention_heads (`int`, *optional*, defaults to 12):
168
+ Number of attention heads for each attention layer in the Transformer encoder.
169
+ num_channels (`int`, *optional*, defaults to 3):
170
+ The number of input channels.
171
+ image_size (`int`, *optional*, defaults to 224):
172
+ The size (resolution) of each image.
173
+ patch_size (`int`, *optional*, defaults to 32):
174
+ The size (resolution) of each patch.
175
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
176
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
177
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
178
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
179
+ The epsilon used by the layer normalization layers.
180
+ attention_dropout (`float`, *optional*, defaults to 0.0):
181
+ The dropout ratio for the attention probabilities.
182
+ initializer_range (`float`, *optional*, defaults to 0.02):
183
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
184
+ initializer_factor (`float`, *optional*, defaults to 1.0):
185
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
186
+ testing).
187
+
188
+ Example:
189
+
190
+ ```python
191
+ >>> from transformers import AltCLIPVisionConfig, AltCLIPVisionModel
192
+
193
+ >>> # Initializing a AltCLIPVisionConfig with BAAI/AltCLIP style configuration
194
+ >>> configuration = AltCLIPVisionConfig()
195
+
196
+ >>> # Initializing a AltCLIPVisionModel (with random weights) from the BAAI/AltCLIP style configuration
197
+ >>> model = AltCLIPVisionModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```"""
202
+
203
+ model_type = "altclip_vision_model"
204
+
205
+ def __init__(
206
+ self,
207
+ hidden_size=768,
208
+ intermediate_size=3072,
209
+ projection_dim=512,
210
+ num_hidden_layers=12,
211
+ num_attention_heads=12,
212
+ num_channels=3,
213
+ image_size=224,
214
+ patch_size=32,
215
+ hidden_act="quick_gelu",
216
+ layer_norm_eps=1e-5,
217
+ attention_dropout=0.0,
218
+ initializer_range=0.02,
219
+ initializer_factor=1.0,
220
+ **kwargs,
221
+ ):
222
+ super().__init__(**kwargs)
223
+
224
+ self.hidden_size = hidden_size
225
+ self.intermediate_size = intermediate_size
226
+ self.projection_dim = projection_dim
227
+ self.num_hidden_layers = num_hidden_layers
228
+ self.num_attention_heads = num_attention_heads
229
+ self.num_channels = num_channels
230
+ self.patch_size = patch_size
231
+ self.image_size = image_size
232
+ self.initializer_range = initializer_range
233
+ self.initializer_factor = initializer_factor
234
+ self.attention_dropout = attention_dropout
235
+ self.layer_norm_eps = layer_norm_eps
236
+ self.hidden_act = hidden_act
237
+
238
+ @classmethod
239
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
240
+ cls._set_token_in_kwargs(kwargs)
241
+
242
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
243
+
244
+ # get the vision config dict if we are loading from AltCLIPConfig
245
+ if config_dict.get("model_type") == "altclip":
246
+ config_dict = config_dict["vision_config"]
247
+
248
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
249
+ logger.warning(
250
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
251
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
252
+ )
253
+
254
+ return cls.from_dict(config_dict, **kwargs)
255
+
256
+
257
+ class AltCLIPConfig(PretrainedConfig):
258
+ r"""
259
+ This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
260
+ AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
261
+ with the defaults will yield a similar configuration to that of the AltCLIP
262
+ [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
263
+
264
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
265
+ documentation from [`PretrainedConfig`] for more information.
266
+
267
+ Args:
268
+ text_config (`dict`, *optional*):
269
+ Dictionary of configuration options used to initialize [`AltCLIPTextConfig`].
270
+ vision_config (`dict`, *optional*):
271
+ Dictionary of configuration options used to initialize [`AltCLIPVisionConfig`].
272
+ projection_dim (`int`, *optional*, defaults to 768):
273
+ Dimentionality of text and vision projection layers.
274
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
275
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
276
+ kwargs (*optional*):
277
+ Dictionary of keyword arguments.
278
+
279
+ Example:
280
+
281
+ ```python
282
+ >>> from transformers import AltCLIPConfig, AltCLIPModel
283
+
284
+ >>> # Initializing a AltCLIPConfig with BAAI/AltCLIP style configuration
285
+ >>> configuration = AltCLIPConfig()
286
+
287
+ >>> # Initializing a AltCLIPModel (with random weights) from the BAAI/AltCLIP style configuration
288
+ >>> model = AltCLIPModel(configuration)
289
+
290
+ >>> # Accessing the model configuration
291
+ >>> configuration = model.config
292
+
293
+ >>> # We can also initialize a AltCLIPConfig from a AltCLIPTextConfig and a AltCLIPVisionConfig
294
+
295
+ >>> # Initializing a AltCLIPText and AltCLIPVision configuration
296
+ >>> config_text = AltCLIPTextConfig()
297
+ >>> config_vision = AltCLIPVisionConfig()
298
+
299
+ >>> config = AltCLIPConfig.from_text_vision_configs(config_text, config_vision)
300
+ ```"""
301
+
302
+ model_type = "altclip"
303
+
304
+ def __init__(
305
+ self, text_config=None, vision_config=None, projection_dim=768, logit_scale_init_value=2.6592, **kwargs
306
+ ):
307
+ # If `_config_dict` exist, we use them for the backward compatibility.
308
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
309
+ # of confusion!).
310
+ text_config_dict = kwargs.pop("text_config_dict", None)
311
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
312
+
313
+ super().__init__(**kwargs)
314
+
315
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
316
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
317
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
318
+ if text_config_dict is not None:
319
+ if text_config is None:
320
+ text_config = {}
321
+
322
+ # This is the complete result when using `text_config_dict`.
323
+ _text_config_dict = AltCLIPTextConfig(**text_config_dict).to_dict()
324
+
325
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
326
+ for key, value in _text_config_dict.items():
327
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
328
+ # If specified in `text_config_dict`
329
+ if key in text_config_dict:
330
+ message = (
331
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
332
+ f'The value `text_config_dict["{key}"]` will be used instead.'
333
+ )
334
+ # If inferred from default argument values (just to be super careful)
335
+ else:
336
+ message = (
337
+ f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
338
+ f'value `text_config["{key}"]` will be overriden.'
339
+ )
340
+ logger.info(message)
341
+
342
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
343
+ text_config.update(_text_config_dict)
344
+
345
+ if vision_config_dict is not None:
346
+ if vision_config is None:
347
+ vision_config = {}
348
+
349
+ # This is the complete result when using `vision_config_dict`.
350
+ _vision_config_dict = AltCLIPVisionConfig(**vision_config_dict).to_dict()
351
+ # convert keys to string instead of integer
352
+ if "id2label" in _vision_config_dict:
353
+ _vision_config_dict["id2label"] = {
354
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
355
+ }
356
+
357
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
358
+ for key, value in _vision_config_dict.items():
359
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
360
+ # If specified in `vision_config_dict`
361
+ if key in vision_config_dict:
362
+ message = (
363
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
364
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
365
+ )
366
+ # If inferred from default argument values (just to be super careful)
367
+ else:
368
+ message = (
369
+ f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
370
+ f'The value `vision_config["{key}"]` will be overriden.'
371
+ )
372
+ logger.info(message)
373
+
374
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
375
+ vision_config.update(_vision_config_dict)
376
+
377
+ if text_config is None:
378
+ text_config = {}
379
+ logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
380
+
381
+ if vision_config is None:
382
+ vision_config = {}
383
+ logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
384
+
385
+ self.text_config = AltCLIPTextConfig(**text_config)
386
+ self.vision_config = AltCLIPVisionConfig(**vision_config)
387
+
388
+ self.projection_dim = projection_dim
389
+ self.logit_scale_init_value = logit_scale_init_value
390
+ self.initializer_factor = 1.0
391
+
392
+ @classmethod
393
+ def from_text_vision_configs(cls, text_config: AltCLIPTextConfig, vision_config: AltCLIPVisionConfig, **kwargs):
394
+ r"""
395
+ Instantiate a [`AltCLIPConfig`] (or a derived class) from altclip text model configuration and altclip vision
396
+ model configuration.
397
+
398
+ Returns:
399
+ [`AltCLIPConfig`]: An instance of a configuration object
400
+ """
401
+
402
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
venv/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py ADDED
@@ -0,0 +1,1693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The BAAI Teams Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch AltCLIP model."""
16
+ import math
17
+ from dataclasses import dataclass
18
+ from typing import Any, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.utils.checkpoint
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutput,
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPooling,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndProjection,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
35
+ from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CHECKPOINT_FOR_DOC = "BAAI/AltCLIP"
41
+ _CONFIG_FOR_DOC = "AltCLIPConfig"
42
+
43
+
44
+ from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
45
+
46
+
47
+ ALTCLIP_START_DOCSTRING = r"""
48
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
50
+ etc.)
51
+
52
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
53
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
54
+ and behavior.
55
+
56
+ Parameters:
57
+ config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
58
+ Initializing with a config file does not load the weights associated with the model, only the
59
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
60
+ """
61
+
62
+ ALTCLIP_TEXT_INPUTS_DOCSTRING = r"""
63
+ Args:
64
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
65
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
66
+ it.
67
+
68
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
69
+ [`PreTrainedTokenizer.__call__`] for details.
70
+
71
+ [What are input IDs?](../glossary#input-ids)
72
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
73
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
74
+
75
+ - 1 for tokens that are **not masked**,
76
+ - 0 for tokens that are **masked**.
77
+
78
+ [What are attention masks?](../glossary#attention-mask)
79
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
80
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
81
+ config.max_position_embeddings - 1]`.
82
+
83
+ [What are position IDs?](../glossary#position-ids)
84
+ output_attentions (`bool`, *optional*):
85
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
86
+ tensors for more detail.
87
+ output_hidden_states (`bool`, *optional*):
88
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
89
+ more detail.
90
+ return_dict (`bool`, *optional*):
91
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
92
+ """
93
+
94
+ ALTCLIP_VISION_INPUTS_DOCSTRING = r"""
95
+ Args:
96
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
97
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
98
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
99
+ output_attentions (`bool`, *optional*):
100
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
101
+ tensors for more detail.
102
+ output_hidden_states (`bool`, *optional*):
103
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
104
+ more detail.
105
+ return_dict (`bool`, *optional*):
106
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
107
+ """
108
+
109
+ ALTCLIP_INPUTS_DOCSTRING = r"""
110
+ Args:
111
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
112
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
113
+ it.
114
+
115
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
116
+ [`PreTrainedTokenizer.__call__`] for details.
117
+
118
+ [What are input IDs?](../glossary#input-ids)
119
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
120
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
121
+
122
+ - 1 for tokens that are **not masked**,
123
+ - 0 for tokens that are **masked**.
124
+
125
+ [What are attention masks?](../glossary#attention-mask)
126
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
127
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
128
+ config.max_position_embeddings - 1]`.
129
+
130
+ [What are position IDs?](../glossary#position-ids)
131
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
132
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
133
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
134
+ return_loss (`bool`, *optional*):
135
+ Whether or not to return the contrastive loss.
136
+ output_attentions (`bool`, *optional*):
137
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
138
+ tensors for more detail.
139
+ output_hidden_states (`bool`, *optional*):
140
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
141
+ more detail.
142
+ return_dict (`bool`, *optional*):
143
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
144
+ """
145
+
146
+
147
+ # contrastive loss function, adapted from
148
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
149
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
150
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
151
+
152
+
153
+ def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
154
+ caption_loss = contrastive_loss(similarity)
155
+ image_loss = contrastive_loss(similarity.t())
156
+ return (caption_loss + image_loss) / 2.0
157
+
158
+
159
+ @dataclass
160
+ # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
161
+ class AltCLIPOutput(ModelOutput):
162
+ """
163
+ Args:
164
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
165
+ Contrastive loss for image-text similarity.
166
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
167
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
168
+ similarity scores.
169
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
170
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
171
+ similarity scores.
172
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
173
+ The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`].
174
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
175
+ The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
176
+ text_model_output(`BaseModelOutputWithPooling`):
177
+ The output of the [`AltCLIPTextModel`].
178
+ vision_model_output(`BaseModelOutputWithPooling`):
179
+ The output of the [`AltCLIPVisionModel`].
180
+ """
181
+
182
+ loss: Optional[torch.FloatTensor] = None
183
+ logits_per_image: torch.FloatTensor = None
184
+ logits_per_text: torch.FloatTensor = None
185
+ text_embeds: torch.FloatTensor = None
186
+ image_embeds: torch.FloatTensor = None
187
+ text_model_output: BaseModelOutputWithPooling = None
188
+ vision_model_output: BaseModelOutputWithPooling = None
189
+
190
+ def to_tuple(self) -> Tuple[Any]:
191
+ return tuple(
192
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
193
+ for k in self.keys()
194
+ )
195
+
196
+
197
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->AltRoberta
198
+ class AltRobertaEmbeddings(nn.Module):
199
+ """
200
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
201
+ """
202
+
203
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
204
+ def __init__(self, config):
205
+ super().__init__()
206
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
207
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
208
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
209
+
210
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
211
+ # any TensorFlow checkpoint file
212
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
213
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
214
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
215
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
216
+ self.register_buffer(
217
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
218
+ )
219
+ self.register_buffer(
220
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
221
+ )
222
+
223
+ # End copy
224
+ self.padding_idx = config.pad_token_id
225
+ self.position_embeddings = nn.Embedding(
226
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
227
+ )
228
+
229
+ def forward(
230
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
231
+ ):
232
+ if position_ids is None:
233
+ if input_ids is not None:
234
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
235
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
236
+ else:
237
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
238
+
239
+ if input_ids is not None:
240
+ input_shape = input_ids.size()
241
+ else:
242
+ input_shape = inputs_embeds.size()[:-1]
243
+
244
+ seq_length = input_shape[1]
245
+
246
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
247
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
248
+ # issue #5664
249
+ if token_type_ids is None:
250
+ if hasattr(self, "token_type_ids"):
251
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
252
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
253
+ token_type_ids = buffered_token_type_ids_expanded
254
+ else:
255
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
256
+
257
+ if inputs_embeds is None:
258
+ inputs_embeds = self.word_embeddings(input_ids)
259
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
260
+
261
+ embeddings = inputs_embeds + token_type_embeddings
262
+ if self.position_embedding_type == "absolute":
263
+ position_embeddings = self.position_embeddings(position_ids)
264
+ embeddings += position_embeddings
265
+ embeddings = self.LayerNorm(embeddings)
266
+ embeddings = self.dropout(embeddings)
267
+ return embeddings
268
+
269
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
270
+ """
271
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
272
+
273
+ Args:
274
+ inputs_embeds: torch.Tensor
275
+
276
+ Returns: torch.Tensor
277
+ """
278
+ input_shape = inputs_embeds.size()[:-1]
279
+ sequence_length = input_shape[1]
280
+
281
+ position_ids = torch.arange(
282
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
283
+ )
284
+ return position_ids.unsqueeze(0).expand(input_shape)
285
+
286
+
287
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->AltRoberta
288
+ class AltRobertaSelfAttention(nn.Module):
289
+ def __init__(self, config, position_embedding_type=None):
290
+ super().__init__()
291
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
292
+ raise ValueError(
293
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
294
+ f"heads ({config.num_attention_heads})"
295
+ )
296
+
297
+ self.num_attention_heads = config.num_attention_heads
298
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
299
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
300
+
301
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
302
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
303
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
304
+
305
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
306
+ self.position_embedding_type = position_embedding_type or getattr(
307
+ config, "position_embedding_type", "absolute"
308
+ )
309
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
310
+ self.max_position_embeddings = config.max_position_embeddings
311
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
312
+
313
+ self.is_decoder = config.is_decoder
314
+
315
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
316
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
317
+ x = x.view(new_x_shape)
318
+ return x.permute(0, 2, 1, 3)
319
+
320
+ def forward(
321
+ self,
322
+ hidden_states: torch.Tensor,
323
+ attention_mask: Optional[torch.FloatTensor] = None,
324
+ head_mask: Optional[torch.FloatTensor] = None,
325
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
326
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
327
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
328
+ output_attentions: Optional[bool] = False,
329
+ ) -> Tuple[torch.Tensor]:
330
+ mixed_query_layer = self.query(hidden_states)
331
+
332
+ # If this is instantiated as a cross-attention module, the keys
333
+ # and values come from an encoder; the attention mask needs to be
334
+ # such that the encoder's padding tokens are not attended to.
335
+ is_cross_attention = encoder_hidden_states is not None
336
+
337
+ if is_cross_attention and past_key_value is not None:
338
+ # reuse k,v, cross_attentions
339
+ key_layer = past_key_value[0]
340
+ value_layer = past_key_value[1]
341
+ attention_mask = encoder_attention_mask
342
+ elif is_cross_attention:
343
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
344
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
345
+ attention_mask = encoder_attention_mask
346
+ elif past_key_value is not None:
347
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
348
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
349
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
350
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
351
+ else:
352
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
353
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
354
+
355
+ query_layer = self.transpose_for_scores(mixed_query_layer)
356
+
357
+ use_cache = past_key_value is not None
358
+ if self.is_decoder:
359
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
360
+ # Further calls to cross_attention layer can then reuse all cross-attention
361
+ # key/value_states (first "if" case)
362
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
363
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
364
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
365
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
366
+ past_key_value = (key_layer, value_layer)
367
+
368
+ # Take the dot product between "query" and "key" to get the raw attention scores.
369
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
370
+
371
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
372
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
373
+ if use_cache:
374
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
375
+ -1, 1
376
+ )
377
+ else:
378
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
379
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
380
+ distance = position_ids_l - position_ids_r
381
+
382
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
383
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
384
+
385
+ if self.position_embedding_type == "relative_key":
386
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
387
+ attention_scores = attention_scores + relative_position_scores
388
+ elif self.position_embedding_type == "relative_key_query":
389
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
390
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
391
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
392
+
393
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
394
+ if attention_mask is not None:
395
+ # Apply the attention mask is (precomputed for all layers in AltRobertaModel forward() function)
396
+ attention_scores = attention_scores + attention_mask
397
+
398
+ # Normalize the attention scores to probabilities.
399
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
400
+
401
+ # This is actually dropping out entire tokens to attend to, which might
402
+ # seem a bit unusual, but is taken from the original Transformer paper.
403
+ attention_probs = self.dropout(attention_probs)
404
+
405
+ # Mask heads if we want to
406
+ if head_mask is not None:
407
+ attention_probs = attention_probs * head_mask
408
+
409
+ context_layer = torch.matmul(attention_probs, value_layer)
410
+
411
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
412
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
413
+ context_layer = context_layer.view(new_context_layer_shape)
414
+
415
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
416
+
417
+ if self.is_decoder:
418
+ outputs = outputs + (past_key_value,)
419
+ return outputs
420
+
421
+
422
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput
423
+ class AltRobertaSelfOutput(nn.Module):
424
+ def __init__(self, config):
425
+ super().__init__()
426
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
427
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
428
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
429
+
430
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
431
+ hidden_states = self.dense(hidden_states)
432
+ hidden_states = self.dropout(hidden_states)
433
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
434
+ return hidden_states
435
+
436
+
437
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->AltRoberta
438
+ class AltRobertaAttention(nn.Module):
439
+ def __init__(self, config, position_embedding_type=None):
440
+ super().__init__()
441
+ self.self = AltRobertaSelfAttention(config, position_embedding_type=position_embedding_type)
442
+ self.output = AltRobertaSelfOutput(config)
443
+ self.pruned_heads = set()
444
+
445
+ def prune_heads(self, heads):
446
+ if len(heads) == 0:
447
+ return
448
+ heads, index = find_pruneable_heads_and_indices(
449
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
450
+ )
451
+
452
+ # Prune linear layers
453
+ self.self.query = prune_linear_layer(self.self.query, index)
454
+ self.self.key = prune_linear_layer(self.self.key, index)
455
+ self.self.value = prune_linear_layer(self.self.value, index)
456
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
457
+
458
+ # Update hyper params and store pruned heads
459
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
460
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
461
+ self.pruned_heads = self.pruned_heads.union(heads)
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ attention_mask: Optional[torch.FloatTensor] = None,
467
+ head_mask: Optional[torch.FloatTensor] = None,
468
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
469
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
470
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
471
+ output_attentions: Optional[bool] = False,
472
+ ) -> Tuple[torch.Tensor]:
473
+ self_outputs = self.self(
474
+ hidden_states,
475
+ attention_mask,
476
+ head_mask,
477
+ encoder_hidden_states,
478
+ encoder_attention_mask,
479
+ past_key_value,
480
+ output_attentions,
481
+ )
482
+ attention_output = self.output(self_outputs[0], hidden_states)
483
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
484
+ return outputs
485
+
486
+
487
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate with Roberta->AltRoberta
488
+ class AltRobertaIntermediate(nn.Module):
489
+ def __init__(self, config):
490
+ super().__init__()
491
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
492
+ if isinstance(config.hidden_act, str):
493
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
494
+ else:
495
+ self.intermediate_act_fn = config.hidden_act
496
+
497
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
498
+ hidden_states = self.dense(hidden_states)
499
+ hidden_states = self.intermediate_act_fn(hidden_states)
500
+ return hidden_states
501
+
502
+
503
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaOutput
504
+ class AltRobertaOutput(nn.Module):
505
+ def __init__(self, config):
506
+ super().__init__()
507
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
508
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
509
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
510
+
511
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
512
+ hidden_states = self.dense(hidden_states)
513
+ hidden_states = self.dropout(hidden_states)
514
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
515
+ return hidden_states
516
+
517
+
518
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->AltRoberta
519
+ class AltRobertaLayer(nn.Module):
520
+ def __init__(self, config):
521
+ super().__init__()
522
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
523
+ self.seq_len_dim = 1
524
+ self.attention = AltRobertaAttention(config)
525
+ self.is_decoder = config.is_decoder
526
+ self.add_cross_attention = config.add_cross_attention
527
+ if self.add_cross_attention:
528
+ if not self.is_decoder:
529
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
530
+ self.crossattention = AltRobertaAttention(config, position_embedding_type="absolute")
531
+ self.intermediate = AltRobertaIntermediate(config)
532
+ self.output = AltRobertaOutput(config)
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states: torch.Tensor,
537
+ attention_mask: Optional[torch.FloatTensor] = None,
538
+ head_mask: Optional[torch.FloatTensor] = None,
539
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
540
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
541
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
542
+ output_attentions: Optional[bool] = False,
543
+ ) -> Tuple[torch.Tensor]:
544
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
545
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
546
+ self_attention_outputs = self.attention(
547
+ hidden_states,
548
+ attention_mask,
549
+ head_mask,
550
+ output_attentions=output_attentions,
551
+ past_key_value=self_attn_past_key_value,
552
+ )
553
+ attention_output = self_attention_outputs[0]
554
+
555
+ # if decoder, the last output is tuple of self-attn cache
556
+ if self.is_decoder:
557
+ outputs = self_attention_outputs[1:-1]
558
+ present_key_value = self_attention_outputs[-1]
559
+ else:
560
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
561
+
562
+ cross_attn_present_key_value = None
563
+ if self.is_decoder and encoder_hidden_states is not None:
564
+ if not hasattr(self, "crossattention"):
565
+ raise ValueError(
566
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
567
+ " by setting `config.add_cross_attention=True`"
568
+ )
569
+
570
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
571
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
572
+ cross_attention_outputs = self.crossattention(
573
+ attention_output,
574
+ attention_mask,
575
+ head_mask,
576
+ encoder_hidden_states,
577
+ encoder_attention_mask,
578
+ cross_attn_past_key_value,
579
+ output_attentions,
580
+ )
581
+ attention_output = cross_attention_outputs[0]
582
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
583
+
584
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
585
+ cross_attn_present_key_value = cross_attention_outputs[-1]
586
+ present_key_value = present_key_value + cross_attn_present_key_value
587
+
588
+ layer_output = apply_chunking_to_forward(
589
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
590
+ )
591
+ outputs = (layer_output,) + outputs
592
+
593
+ # if decoder, return the attn key/values as the last output
594
+ if self.is_decoder:
595
+ outputs = outputs + (present_key_value,)
596
+
597
+ return outputs
598
+
599
+ def feed_forward_chunk(self, attention_output):
600
+ intermediate_output = self.intermediate(attention_output)
601
+ layer_output = self.output(intermediate_output, attention_output)
602
+ return layer_output
603
+
604
+
605
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->AltRoberta
606
+ class AltRobertaEncoder(nn.Module):
607
+ def __init__(self, config):
608
+ super().__init__()
609
+ self.config = config
610
+ self.layer = nn.ModuleList([AltRobertaLayer(config) for _ in range(config.num_hidden_layers)])
611
+ self.gradient_checkpointing = False
612
+
613
+ def forward(
614
+ self,
615
+ hidden_states: torch.Tensor,
616
+ attention_mask: Optional[torch.FloatTensor] = None,
617
+ head_mask: Optional[torch.FloatTensor] = None,
618
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
619
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
620
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
621
+ use_cache: Optional[bool] = None,
622
+ output_attentions: Optional[bool] = False,
623
+ output_hidden_states: Optional[bool] = False,
624
+ return_dict: Optional[bool] = True,
625
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
626
+ all_hidden_states = () if output_hidden_states else None
627
+ all_self_attentions = () if output_attentions else None
628
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
629
+
630
+ if self.gradient_checkpointing and self.training:
631
+ if use_cache:
632
+ logger.warning_once(
633
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
634
+ )
635
+ use_cache = False
636
+
637
+ next_decoder_cache = () if use_cache else None
638
+ for i, layer_module in enumerate(self.layer):
639
+ if output_hidden_states:
640
+ all_hidden_states = all_hidden_states + (hidden_states,)
641
+
642
+ layer_head_mask = head_mask[i] if head_mask is not None else None
643
+ past_key_value = past_key_values[i] if past_key_values is not None else None
644
+
645
+ if self.gradient_checkpointing and self.training:
646
+ layer_outputs = self._gradient_checkpointing_func(
647
+ layer_module.__call__,
648
+ hidden_states,
649
+ attention_mask,
650
+ layer_head_mask,
651
+ encoder_hidden_states,
652
+ encoder_attention_mask,
653
+ past_key_value,
654
+ output_attentions,
655
+ )
656
+ else:
657
+ layer_outputs = layer_module(
658
+ hidden_states,
659
+ attention_mask,
660
+ layer_head_mask,
661
+ encoder_hidden_states,
662
+ encoder_attention_mask,
663
+ past_key_value,
664
+ output_attentions,
665
+ )
666
+
667
+ hidden_states = layer_outputs[0]
668
+ if use_cache:
669
+ next_decoder_cache += (layer_outputs[-1],)
670
+ if output_attentions:
671
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
672
+ if self.config.add_cross_attention:
673
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
674
+
675
+ if output_hidden_states:
676
+ all_hidden_states = all_hidden_states + (hidden_states,)
677
+
678
+ if not return_dict:
679
+ return tuple(
680
+ v
681
+ for v in [
682
+ hidden_states,
683
+ next_decoder_cache,
684
+ all_hidden_states,
685
+ all_self_attentions,
686
+ all_cross_attentions,
687
+ ]
688
+ if v is not None
689
+ )
690
+ return BaseModelOutputWithPastAndCrossAttentions(
691
+ last_hidden_state=hidden_states,
692
+ past_key_values=next_decoder_cache,
693
+ hidden_states=all_hidden_states,
694
+ attentions=all_self_attentions,
695
+ cross_attentions=all_cross_attentions,
696
+ )
697
+
698
+
699
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaPooler
700
+ class AltRobertaPooler(nn.Module):
701
+ def __init__(self, config):
702
+ super().__init__()
703
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
704
+ self.activation = nn.Tanh()
705
+
706
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
707
+ # We "pool" the model by simply taking the hidden state corresponding
708
+ # to the first token.
709
+ first_token_tensor = hidden_states[:, 0]
710
+ pooled_output = self.dense(first_token_tensor)
711
+ pooled_output = self.activation(pooled_output)
712
+ return pooled_output
713
+
714
+
715
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->AltCLIP
716
+ class AltCLIPAttention(nn.Module):
717
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
718
+
719
+ def __init__(self, config):
720
+ super().__init__()
721
+ self.config = config
722
+ self.embed_dim = config.hidden_size
723
+ self.num_heads = config.num_attention_heads
724
+ self.head_dim = self.embed_dim // self.num_heads
725
+ if self.head_dim * self.num_heads != self.embed_dim:
726
+ raise ValueError(
727
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
728
+ f" {self.num_heads})."
729
+ )
730
+ self.scale = self.head_dim**-0.5
731
+ self.dropout = config.attention_dropout
732
+
733
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
734
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
735
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
736
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
737
+
738
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
739
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
740
+
741
+ def forward(
742
+ self,
743
+ hidden_states: torch.Tensor,
744
+ attention_mask: Optional[torch.Tensor] = None,
745
+ causal_attention_mask: Optional[torch.Tensor] = None,
746
+ output_attentions: Optional[bool] = False,
747
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
748
+ """Input shape: Batch x Time x Channel"""
749
+
750
+ bsz, tgt_len, embed_dim = hidden_states.size()
751
+
752
+ # get query proj
753
+ query_states = self.q_proj(hidden_states) * self.scale
754
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
755
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
756
+
757
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
758
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
759
+ key_states = key_states.view(*proj_shape)
760
+ value_states = value_states.view(*proj_shape)
761
+
762
+ src_len = key_states.size(1)
763
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
764
+
765
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
766
+ raise ValueError(
767
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
768
+ f" {attn_weights.size()}"
769
+ )
770
+
771
+ # apply the causal_attention_mask first
772
+ if causal_attention_mask is not None:
773
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
774
+ raise ValueError(
775
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
776
+ f" {causal_attention_mask.size()}"
777
+ )
778
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
779
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
780
+
781
+ if attention_mask is not None:
782
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
783
+ raise ValueError(
784
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
785
+ )
786
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
787
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
788
+
789
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
790
+
791
+ if output_attentions:
792
+ # this operation is a bit akward, but it's required to
793
+ # make sure that attn_weights keeps its gradient.
794
+ # In order to do so, attn_weights have to reshaped
795
+ # twice and have to be reused in the following
796
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
797
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
798
+ else:
799
+ attn_weights_reshaped = None
800
+
801
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
802
+
803
+ attn_output = torch.bmm(attn_probs, value_states)
804
+
805
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
806
+ raise ValueError(
807
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
808
+ f" {attn_output.size()}"
809
+ )
810
+
811
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
812
+ attn_output = attn_output.transpose(1, 2)
813
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
814
+
815
+ attn_output = self.out_proj(attn_output)
816
+
817
+ return attn_output, attn_weights_reshaped
818
+
819
+
820
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->AltCLIP
821
+ class AltCLIPMLP(nn.Module):
822
+ def __init__(self, config):
823
+ super().__init__()
824
+ self.config = config
825
+ self.activation_fn = ACT2FN[config.hidden_act]
826
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
827
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
828
+
829
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
830
+ hidden_states = self.fc1(hidden_states)
831
+ hidden_states = self.activation_fn(hidden_states)
832
+ hidden_states = self.fc2(hidden_states)
833
+ return hidden_states
834
+
835
+
836
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->AltCLIP
837
+ class AltCLIPEncoderLayer(nn.Module):
838
+ def __init__(self, config: AltCLIPConfig):
839
+ super().__init__()
840
+ self.embed_dim = config.hidden_size
841
+ self.self_attn = AltCLIPAttention(config)
842
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
843
+ self.mlp = AltCLIPMLP(config)
844
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
845
+
846
+ def forward(
847
+ self,
848
+ hidden_states: torch.Tensor,
849
+ attention_mask: torch.Tensor,
850
+ causal_attention_mask: torch.Tensor,
851
+ output_attentions: Optional[bool] = False,
852
+ ) -> Tuple[torch.FloatTensor]:
853
+ """
854
+ Args:
855
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
856
+ attention_mask (`torch.FloatTensor`): attention mask of size
857
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
858
+ `(config.encoder_attention_heads,)`.
859
+ output_attentions (`bool`, *optional*):
860
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
861
+ returned tensors for more detail.
862
+ """
863
+ residual = hidden_states
864
+
865
+ hidden_states = self.layer_norm1(hidden_states)
866
+ hidden_states, attn_weights = self.self_attn(
867
+ hidden_states=hidden_states,
868
+ attention_mask=attention_mask,
869
+ causal_attention_mask=causal_attention_mask,
870
+ output_attentions=output_attentions,
871
+ )
872
+ hidden_states = residual + hidden_states
873
+
874
+ residual = hidden_states
875
+ hidden_states = self.layer_norm2(hidden_states)
876
+ hidden_states = self.mlp(hidden_states)
877
+ hidden_states = residual + hidden_states
878
+
879
+ outputs = (hidden_states,)
880
+
881
+ if output_attentions:
882
+ outputs += (attn_weights,)
883
+
884
+ return outputs
885
+
886
+
887
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->AltCLIP
888
+ class AltCLIPEncoder(nn.Module):
889
+ """
890
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
891
+ [`AltCLIPEncoderLayer`].
892
+
893
+ Args:
894
+ config: AltCLIPConfig
895
+ """
896
+
897
+ def __init__(self, config: AltCLIPConfig):
898
+ super().__init__()
899
+ self.config = config
900
+ self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
901
+ self.gradient_checkpointing = False
902
+
903
+ def forward(
904
+ self,
905
+ inputs_embeds,
906
+ attention_mask: Optional[torch.Tensor] = None,
907
+ causal_attention_mask: Optional[torch.Tensor] = None,
908
+ output_attentions: Optional[bool] = None,
909
+ output_hidden_states: Optional[bool] = None,
910
+ return_dict: Optional[bool] = None,
911
+ ) -> Union[Tuple, BaseModelOutput]:
912
+ r"""
913
+ Args:
914
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
915
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
916
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
917
+ than the model's internal embedding lookup matrix.
918
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
919
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
920
+
921
+ - 1 for tokens that are **not masked**,
922
+ - 0 for tokens that are **masked**.
923
+
924
+ [What are attention masks?](../glossary#attention-mask)
925
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
926
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
927
+
928
+ - 1 for tokens that are **not masked**,
929
+ - 0 for tokens that are **masked**.
930
+
931
+ [What are attention masks?](../glossary#attention-mask)
932
+ output_attentions (`bool`, *optional*):
933
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
934
+ returned tensors for more detail.
935
+ output_hidden_states (`bool`, *optional*):
936
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
937
+ for more detail.
938
+ return_dict (`bool`, *optional*):
939
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
940
+ """
941
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
942
+ output_hidden_states = (
943
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
944
+ )
945
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
946
+
947
+ encoder_states = () if output_hidden_states else None
948
+ all_attentions = () if output_attentions else None
949
+
950
+ hidden_states = inputs_embeds
951
+ for idx, encoder_layer in enumerate(self.layers):
952
+ if output_hidden_states:
953
+ encoder_states = encoder_states + (hidden_states,)
954
+ if self.gradient_checkpointing and self.training:
955
+ layer_outputs = self._gradient_checkpointing_func(
956
+ encoder_layer.__call__,
957
+ hidden_states,
958
+ attention_mask,
959
+ causal_attention_mask,
960
+ output_attentions,
961
+ )
962
+ else:
963
+ layer_outputs = encoder_layer(
964
+ hidden_states,
965
+ attention_mask,
966
+ causal_attention_mask,
967
+ output_attentions=output_attentions,
968
+ )
969
+
970
+ hidden_states = layer_outputs[0]
971
+
972
+ if output_attentions:
973
+ all_attentions = all_attentions + (layer_outputs[1],)
974
+
975
+ if output_hidden_states:
976
+ encoder_states = encoder_states + (hidden_states,)
977
+
978
+ if not return_dict:
979
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
980
+ return BaseModelOutput(
981
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
982
+ )
983
+
984
+
985
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->AltCLIP
986
+ class AltCLIPVisionEmbeddings(nn.Module):
987
+ def __init__(self, config: AltCLIPVisionConfig):
988
+ super().__init__()
989
+ self.config = config
990
+ self.embed_dim = config.hidden_size
991
+ self.image_size = config.image_size
992
+ self.patch_size = config.patch_size
993
+
994
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
995
+
996
+ self.patch_embedding = nn.Conv2d(
997
+ in_channels=config.num_channels,
998
+ out_channels=self.embed_dim,
999
+ kernel_size=self.patch_size,
1000
+ stride=self.patch_size,
1001
+ bias=False,
1002
+ )
1003
+
1004
+ self.num_patches = (self.image_size // self.patch_size) ** 2
1005
+ self.num_positions = self.num_patches + 1
1006
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
1007
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
1008
+
1009
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
1010
+ batch_size = pixel_values.shape[0]
1011
+ target_dtype = self.patch_embedding.weight.dtype
1012
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
1013
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
1014
+
1015
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
1016
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
1017
+ embeddings = embeddings + self.position_embedding(self.position_ids)
1018
+ return embeddings
1019
+
1020
+
1021
+ class AltCLIPPreTrainedModel(PreTrainedModel):
1022
+ """
1023
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1024
+ models.
1025
+ """
1026
+
1027
+ config_class = AltCLIPConfig
1028
+ base_model_prefix = "altclip"
1029
+ supports_gradient_checkpointing = True
1030
+
1031
+ def _init_weights(self, module):
1032
+ """Initialize the weights"""
1033
+ factor = self.config.initializer_factor
1034
+ if isinstance(module, AltCLIPVisionEmbeddings):
1035
+ factor = self.config.initializer_factor
1036
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
1037
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
1038
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
1039
+ elif isinstance(module, AltCLIPAttention):
1040
+ factor = self.config.initializer_factor
1041
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1042
+ out_proj_std = (module.embed_dim**-0.5) * factor
1043
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
1044
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
1045
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
1046
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
1047
+ elif isinstance(module, AltCLIPMLP):
1048
+ factor = self.config.initializer_factor
1049
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1050
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
1051
+ nn.init.normal_(module.fc1.weight, std=fc_std)
1052
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
1053
+ elif isinstance(module, AltCLIPModel):
1054
+ nn.init.normal_(
1055
+ module.text_projection.weight,
1056
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
1057
+ )
1058
+ module.text_projection._is_hf_initialized = True
1059
+ nn.init.normal_(
1060
+ module.visual_projection.weight,
1061
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
1062
+ )
1063
+ module.visual_projection._is_hf_initialized = True
1064
+ elif isinstance(module, nn.LayerNorm):
1065
+ module.bias.data.zero_()
1066
+ module.weight.data.fill_(1.0)
1067
+ elif isinstance(module, nn.Linear):
1068
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor)
1069
+ if module.bias is not None:
1070
+ module.bias.data.zero_()
1071
+ elif isinstance(module, nn.Embedding):
1072
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor)
1073
+ if module.padding_idx is not None:
1074
+ module.weight.data[module.padding_idx].zero_()
1075
+
1076
+
1077
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer with CLIPVisionTransformer->AltCLIPVisionTransformer,CLIPVisionConfig->AltCLIPVisionConfig,CLIPVisionEmbeddings->AltCLIPVisionEmbeddings,CLIPEncoder->AltCLIPEncoder,CLIP_VISION_INPUTS_DOCSTRING->ALTCLIP_VISION_INPUTS_DOCSTRING
1078
+ class AltCLIPVisionTransformer(nn.Module):
1079
+ def __init__(self, config: AltCLIPVisionConfig):
1080
+ super().__init__()
1081
+ self.config = config
1082
+ embed_dim = config.hidden_size
1083
+
1084
+ self.embeddings = AltCLIPVisionEmbeddings(config)
1085
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1086
+ self.encoder = AltCLIPEncoder(config)
1087
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1088
+
1089
+ @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING)
1090
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig)
1091
+ def forward(
1092
+ self,
1093
+ pixel_values: Optional[torch.FloatTensor] = None,
1094
+ output_attentions: Optional[bool] = None,
1095
+ output_hidden_states: Optional[bool] = None,
1096
+ return_dict: Optional[bool] = None,
1097
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1098
+ r"""
1099
+ Returns:
1100
+
1101
+ """
1102
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1103
+ output_hidden_states = (
1104
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1105
+ )
1106
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1107
+
1108
+ if pixel_values is None:
1109
+ raise ValueError("You have to specify pixel_values")
1110
+
1111
+ hidden_states = self.embeddings(pixel_values)
1112
+ hidden_states = self.pre_layrnorm(hidden_states)
1113
+
1114
+ encoder_outputs = self.encoder(
1115
+ inputs_embeds=hidden_states,
1116
+ output_attentions=output_attentions,
1117
+ output_hidden_states=output_hidden_states,
1118
+ return_dict=return_dict,
1119
+ )
1120
+
1121
+ last_hidden_state = encoder_outputs[0]
1122
+ pooled_output = last_hidden_state[:, 0, :]
1123
+ pooled_output = self.post_layernorm(pooled_output)
1124
+
1125
+ if not return_dict:
1126
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1127
+
1128
+ return BaseModelOutputWithPooling(
1129
+ last_hidden_state=last_hidden_state,
1130
+ pooler_output=pooled_output,
1131
+ hidden_states=encoder_outputs.hidden_states,
1132
+ attentions=encoder_outputs.attentions,
1133
+ )
1134
+
1135
+
1136
+ class AltCLIPVisionModel(AltCLIPPreTrainedModel):
1137
+ config_class = AltCLIPVisionConfig
1138
+ main_input_name = "pixel_values"
1139
+
1140
+ def __init__(self, config: AltCLIPVisionConfig):
1141
+ super().__init__(config)
1142
+ self.vision_model = AltCLIPVisionTransformer(config)
1143
+ # Initialize weights and apply final processing
1144
+ self.post_init()
1145
+
1146
+ def get_input_embeddings(self) -> nn.Module:
1147
+ return self.vision_model.embeddings.patch_embedding
1148
+
1149
+ @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING)
1150
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig)
1151
+ def forward(
1152
+ self,
1153
+ pixel_values: Optional[torch.FloatTensor] = None,
1154
+ output_attentions: Optional[bool] = None,
1155
+ output_hidden_states: Optional[bool] = None,
1156
+ return_dict: Optional[bool] = None,
1157
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1158
+ r"""
1159
+ Returns:
1160
+
1161
+ Examples:
1162
+
1163
+ ```python
1164
+ >>> from PIL import Image
1165
+ >>> import requests
1166
+ >>> from transformers import AutoProcessor, AltCLIPVisionModel
1167
+
1168
+ >>> model = AltCLIPVisionModel.from_pretrained("BAAI/AltCLIP")
1169
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1170
+
1171
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1172
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1173
+
1174
+ >>> inputs = processor(images=image, return_tensors="pt")
1175
+
1176
+ >>> outputs = model(**inputs)
1177
+ >>> last_hidden_state = outputs.last_hidden_state
1178
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1179
+ ```"""
1180
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1181
+
1182
+ return self.vision_model(
1183
+ pixel_values=pixel_values,
1184
+ output_attentions=output_attentions,
1185
+ output_hidden_states=output_hidden_states,
1186
+ return_dict=return_dict,
1187
+ )
1188
+
1189
+
1190
+ class AltRobertaModel(AltCLIPPreTrainedModel):
1191
+ """
1192
+
1193
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
1194
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
1195
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
1196
+ Kaiser and Illia Polosukhin.
1197
+
1198
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
1199
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
1200
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
1201
+
1202
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
1203
+
1204
+ """
1205
+
1206
+ config_class = AltCLIPTextConfig
1207
+
1208
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->AltRoberta
1209
+ def __init__(self, config, add_pooling_layer=True):
1210
+ super().__init__(config)
1211
+ self.config = config
1212
+
1213
+ self.embeddings = AltRobertaEmbeddings(config)
1214
+ self.encoder = AltRobertaEncoder(config)
1215
+
1216
+ self.pooler = AltRobertaPooler(config) if add_pooling_layer else None
1217
+
1218
+ # Initialize weights and apply final processing
1219
+ self.post_init()
1220
+
1221
+ def get_input_embeddings(self):
1222
+ return self.embeddings.word_embeddings
1223
+
1224
+ def set_input_embeddings(self, value):
1225
+ self.embeddings.word_embeddings = value
1226
+
1227
+ def _prune_heads(self, heads_to_prune):
1228
+ """
1229
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1230
+ class PreTrainedModel
1231
+ """
1232
+ for layer, heads in heads_to_prune.items():
1233
+ self.encoder.layer[layer].attention.prune_heads(heads)
1234
+
1235
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
1236
+ def forward(
1237
+ self,
1238
+ input_ids: Optional[torch.Tensor] = None,
1239
+ attention_mask: Optional[torch.Tensor] = None,
1240
+ token_type_ids: Optional[torch.Tensor] = None,
1241
+ position_ids: Optional[torch.Tensor] = None,
1242
+ head_mask: Optional[torch.Tensor] = None,
1243
+ inputs_embeds: Optional[torch.Tensor] = None,
1244
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1245
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1246
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1247
+ use_cache: Optional[bool] = None,
1248
+ output_attentions: Optional[bool] = None,
1249
+ output_hidden_states: Optional[bool] = None,
1250
+ return_dict: Optional[bool] = None,
1251
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1252
+ r"""
1253
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1254
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1255
+ the model is configured as a decoder.
1256
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1257
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1258
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1259
+
1260
+ - 1 for tokens that are **not masked**,
1261
+ - 0 for tokens that are **masked**.
1262
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1263
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1264
+
1265
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1266
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1267
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1268
+ use_cache (`bool`, *optional*):
1269
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1270
+ `past_key_values`).
1271
+ """
1272
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1273
+ output_hidden_states = (
1274
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1275
+ )
1276
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1277
+
1278
+ if self.config.is_decoder:
1279
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1280
+ else:
1281
+ use_cache = False
1282
+
1283
+ if input_ids is not None and inputs_embeds is not None:
1284
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1285
+ elif input_ids is not None:
1286
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1287
+ input_shape = input_ids.size()
1288
+ elif inputs_embeds is not None:
1289
+ input_shape = inputs_embeds.size()[:-1]
1290
+ else:
1291
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1292
+
1293
+ batch_size, seq_length = input_shape
1294
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1295
+
1296
+ # past_key_values_length
1297
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1298
+
1299
+ if attention_mask is None:
1300
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1301
+
1302
+ if token_type_ids is None:
1303
+ if hasattr(self.embeddings, "token_type_ids"):
1304
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1305
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1306
+ token_type_ids = buffered_token_type_ids_expanded
1307
+ else:
1308
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1309
+
1310
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1311
+ # ourselves in which case we just need to make it broadcastable to all heads.
1312
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1313
+
1314
+ # If a 2D or 3D attention mask is provided for the cross-attention
1315
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1316
+ if self.config.is_decoder and encoder_hidden_states is not None:
1317
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1318
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1319
+ if encoder_attention_mask is None:
1320
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1321
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1322
+ else:
1323
+ encoder_extended_attention_mask = None
1324
+
1325
+ # Prepare head mask if needed
1326
+ # 1.0 in head_mask indicate we keep the head
1327
+ # attention_probs has shape bsz x n_heads x N x N
1328
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1329
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1330
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1331
+
1332
+ embedding_output = self.embeddings(
1333
+ input_ids=input_ids,
1334
+ position_ids=position_ids,
1335
+ token_type_ids=token_type_ids,
1336
+ inputs_embeds=inputs_embeds,
1337
+ past_key_values_length=past_key_values_length,
1338
+ )
1339
+ encoder_outputs = self.encoder(
1340
+ embedding_output,
1341
+ attention_mask=extended_attention_mask,
1342
+ head_mask=head_mask,
1343
+ encoder_hidden_states=encoder_hidden_states,
1344
+ encoder_attention_mask=encoder_extended_attention_mask,
1345
+ past_key_values=past_key_values,
1346
+ use_cache=use_cache,
1347
+ output_attentions=output_attentions,
1348
+ output_hidden_states=output_hidden_states,
1349
+ return_dict=return_dict,
1350
+ )
1351
+ sequence_output = encoder_outputs[0]
1352
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1353
+
1354
+ if not return_dict:
1355
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1356
+
1357
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1358
+ last_hidden_state=sequence_output,
1359
+ pooler_output=pooled_output,
1360
+ past_key_values=encoder_outputs.past_key_values,
1361
+ hidden_states=encoder_outputs.hidden_states,
1362
+ attentions=encoder_outputs.attentions,
1363
+ cross_attentions=encoder_outputs.cross_attentions,
1364
+ )
1365
+
1366
+
1367
+ class AltCLIPTextModel(AltCLIPPreTrainedModel):
1368
+ config_class = AltCLIPTextConfig
1369
+
1370
+ def __init__(self, config):
1371
+ super().__init__(config)
1372
+ self.roberta = AltRobertaModel(config, add_pooling_layer=False)
1373
+ self.transformation = nn.Linear(config.hidden_size, config.project_dim)
1374
+ self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1375
+ self.post_init()
1376
+
1377
+ def get_input_embeddings(self) -> nn.Module:
1378
+ return self.roberta.embeddings.word_embeddings
1379
+
1380
+ def set_input_embeddings(self, value: nn.Embedding) -> None:
1381
+ self.roberta.embeddings.word_embeddings = value
1382
+
1383
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1384
+ return super().resize_token_embeddings(new_num_tokens)
1385
+
1386
+ @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING)
1387
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndProjection, config_class=AltCLIPTextConfig)
1388
+ def forward(
1389
+ self,
1390
+ input_ids: Optional[torch.Tensor] = None,
1391
+ attention_mask: Optional[torch.Tensor] = None,
1392
+ token_type_ids: Optional[torch.Tensor] = None,
1393
+ position_ids: Optional[torch.Tensor] = None,
1394
+ head_mask: Optional[torch.Tensor] = None,
1395
+ inputs_embeds: Optional[torch.Tensor] = None,
1396
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1397
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1398
+ output_attentions: Optional[bool] = None,
1399
+ return_dict: Optional[bool] = None,
1400
+ output_hidden_states: Optional[bool] = None,
1401
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndProjection]:
1402
+ r"""
1403
+ Returns:
1404
+
1405
+ Examples:
1406
+
1407
+ ```python
1408
+ >>> from transformers import AutoProcessor, AltCLIPTextModel
1409
+
1410
+ >>> model = AltCLIPTextModel.from_pretrained("BAAI/AltCLIP")
1411
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1412
+
1413
+ >>> texts = ["it's a cat", "it's a dog"]
1414
+
1415
+ >>> inputs = processor(text=texts, padding=True, return_tensors="pt")
1416
+
1417
+ >>> outputs = model(**inputs)
1418
+ >>> last_hidden_state = outputs.last_hidden_state
1419
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1420
+ ```"""
1421
+
1422
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1423
+
1424
+ outputs = self.roberta(
1425
+ input_ids=input_ids,
1426
+ attention_mask=attention_mask,
1427
+ token_type_ids=token_type_ids,
1428
+ position_ids=position_ids,
1429
+ head_mask=head_mask,
1430
+ inputs_embeds=inputs_embeds,
1431
+ encoder_hidden_states=encoder_hidden_states,
1432
+ encoder_attention_mask=encoder_attention_mask,
1433
+ output_attentions=output_attentions,
1434
+ output_hidden_states=output_hidden_states,
1435
+ return_dict=return_dict,
1436
+ )
1437
+
1438
+ # last module outputs
1439
+ sequence_output = outputs[0]
1440
+
1441
+ # project every module
1442
+ sequence_output = self.pre_LN(sequence_output)
1443
+
1444
+ # pooler
1445
+ projection_state = self.transformation(sequence_output)
1446
+ pooler_output = projection_state[:, 0]
1447
+
1448
+ if not return_dict:
1449
+ return (projection_state, pooler_output) + outputs[2:4]
1450
+
1451
+ return BaseModelOutputWithPoolingAndProjection(
1452
+ last_hidden_state=projection_state,
1453
+ pooler_output=pooler_output,
1454
+ hidden_states=outputs.hidden_states,
1455
+ attentions=outputs.attentions,
1456
+ )
1457
+
1458
+
1459
+ class AltCLIPModel(AltCLIPPreTrainedModel):
1460
+ config_class = AltCLIPConfig
1461
+
1462
+ def __init__(self, config: AltCLIPConfig):
1463
+ super().__init__(config)
1464
+
1465
+ if not isinstance(config.vision_config, AltCLIPVisionConfig):
1466
+ raise ValueError(
1467
+ "config.vision_config is expected to be of type AltCLIPVisionConfig but is of type"
1468
+ f" {type(config.vision_config)}."
1469
+ )
1470
+ if not isinstance(config.text_config, AltCLIPTextConfig):
1471
+ raise ValueError(
1472
+ "config.text_config is expected to be of type AltCLIPTextConfig but is of type"
1473
+ f" {type(config.text_config)}."
1474
+ )
1475
+
1476
+ text_config = config.text_config
1477
+ vision_config = config.vision_config
1478
+
1479
+ self.projection_dim = config.projection_dim
1480
+ self.text_embed_dim = text_config.project_dim
1481
+ self.vision_embed_dim = vision_config.hidden_size
1482
+
1483
+ self.text_model = AltCLIPTextModel(text_config)
1484
+ self.vision_model = AltCLIPVisionTransformer(vision_config)
1485
+
1486
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
1487
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
1488
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1489
+
1490
+ # Initialize weights and apply final processing
1491
+ self.post_init()
1492
+
1493
+ @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING)
1494
+ def get_text_features(
1495
+ self,
1496
+ input_ids: Optional[torch.Tensor] = None,
1497
+ attention_mask: Optional[torch.Tensor] = None,
1498
+ position_ids: Optional[torch.Tensor] = None,
1499
+ token_type_ids=None,
1500
+ output_attentions: Optional[bool] = None,
1501
+ output_hidden_states: Optional[bool] = None,
1502
+ return_dict: Optional[bool] = None,
1503
+ ) -> torch.FloatTensor:
1504
+ r"""
1505
+ Returns:
1506
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1507
+ applying the projection layer to the pooled output of [`AltCLIPTextModel`].
1508
+
1509
+ Examples:
1510
+
1511
+ ```python
1512
+ >>> from transformers import AutoProcessor, AltCLIPModel
1513
+
1514
+ >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
1515
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1516
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1517
+ >>> text_features = model.get_text_features(**inputs)
1518
+ ```"""
1519
+ # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components.
1520
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1521
+ output_hidden_states = (
1522
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1523
+ )
1524
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1525
+
1526
+ text_outputs = self.text_model(
1527
+ input_ids=input_ids,
1528
+ attention_mask=attention_mask,
1529
+ position_ids=position_ids,
1530
+ token_type_ids=token_type_ids,
1531
+ output_attentions=output_attentions,
1532
+ output_hidden_states=output_hidden_states,
1533
+ return_dict=return_dict,
1534
+ )
1535
+ pooled_output = text_outputs[1]
1536
+ text_features = self.text_projection(pooled_output)
1537
+
1538
+ return text_features
1539
+
1540
+ @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING)
1541
+ def get_image_features(
1542
+ self,
1543
+ pixel_values: Optional[torch.FloatTensor] = None,
1544
+ output_attentions: Optional[bool] = None,
1545
+ output_hidden_states: Optional[bool] = None,
1546
+ return_dict: Optional[bool] = None,
1547
+ ) -> torch.FloatTensor:
1548
+ r"""
1549
+ Returns:
1550
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1551
+ applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
1552
+
1553
+ Examples:
1554
+
1555
+ ```python
1556
+ >>> from PIL import Image
1557
+ >>> import requests
1558
+ >>> from transformers import AutoProcessor, AltCLIPModel
1559
+
1560
+ >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
1561
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1562
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1563
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1564
+ >>> inputs = processor(images=image, return_tensors="pt")
1565
+ >>> image_features = model.get_image_features(**inputs)
1566
+ ```"""
1567
+ # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components.
1568
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1569
+ output_hidden_states = (
1570
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1571
+ )
1572
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1573
+
1574
+ vision_outputs = self.vision_model(
1575
+ pixel_values=pixel_values,
1576
+ output_attentions=output_attentions,
1577
+ output_hidden_states=output_hidden_states,
1578
+ return_dict=return_dict,
1579
+ )
1580
+
1581
+ pooled_output = vision_outputs[1] # pooled_output
1582
+ image_features = self.visual_projection(pooled_output)
1583
+
1584
+ return image_features
1585
+
1586
+ @add_start_docstrings_to_model_forward(ALTCLIP_INPUTS_DOCSTRING)
1587
+ @replace_return_docstrings(output_type=AltCLIPOutput, config_class=AltCLIPConfig)
1588
+ def forward(
1589
+ self,
1590
+ input_ids: Optional[torch.LongTensor] = None,
1591
+ pixel_values: Optional[torch.FloatTensor] = None,
1592
+ attention_mask: Optional[torch.Tensor] = None,
1593
+ position_ids: Optional[torch.LongTensor] = None,
1594
+ token_type_ids: Optional[torch.Tensor] = None,
1595
+ return_loss: Optional[bool] = None,
1596
+ output_attentions: Optional[bool] = None,
1597
+ output_hidden_states: Optional[bool] = None,
1598
+ return_dict: Optional[bool] = None,
1599
+ ) -> Union[Tuple, AltCLIPOutput]:
1600
+ r"""
1601
+ Returns:
1602
+
1603
+ Examples:
1604
+
1605
+ ```python
1606
+ >>> from PIL import Image
1607
+ >>> import requests
1608
+ >>> from transformers import AutoProcessor, AltCLIPModel
1609
+
1610
+ >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
1611
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1612
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1613
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1614
+ >>> inputs = processor(
1615
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1616
+ ... )
1617
+ >>> outputs = model(**inputs)
1618
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1619
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1620
+ ```"""
1621
+ # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components.
1622
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1623
+ output_hidden_states = (
1624
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1625
+ )
1626
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1627
+
1628
+ text_outputs = self.text_model(
1629
+ input_ids=input_ids,
1630
+ attention_mask=attention_mask,
1631
+ token_type_ids=token_type_ids,
1632
+ position_ids=position_ids,
1633
+ output_attentions=output_attentions,
1634
+ output_hidden_states=output_hidden_states,
1635
+ return_dict=return_dict,
1636
+ )
1637
+
1638
+ vision_outputs = self.vision_model(
1639
+ pixel_values=pixel_values,
1640
+ output_attentions=output_attentions,
1641
+ output_hidden_states=output_hidden_states,
1642
+ return_dict=return_dict,
1643
+ )
1644
+
1645
+ image_embeds = vision_outputs[1]
1646
+ image_embeds = self.visual_projection(image_embeds)
1647
+
1648
+ text_embeds = text_outputs[1]
1649
+ text_embeds = self.text_projection(text_embeds)
1650
+
1651
+ # normalized features
1652
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1653
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1654
+
1655
+ # cosine similarity as logits
1656
+ logit_scale = self.logit_scale.exp()
1657
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1658
+ logits_per_image = logits_per_text.T
1659
+
1660
+ loss = None
1661
+ if return_loss:
1662
+ loss = clip_loss(logits_per_text)
1663
+
1664
+ if not return_dict:
1665
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1666
+ return ((loss,) + output) if loss is not None else output
1667
+
1668
+ return AltCLIPOutput(
1669
+ loss=loss,
1670
+ logits_per_image=logits_per_image,
1671
+ logits_per_text=logits_per_text,
1672
+ text_embeds=text_embeds,
1673
+ image_embeds=image_embeds,
1674
+ text_model_output=text_outputs,
1675
+ vision_model_output=vision_outputs,
1676
+ )
1677
+
1678
+
1679
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
1680
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1681
+ """
1682
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1683
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1684
+
1685
+ Args:
1686
+ x: torch.Tensor x:
1687
+
1688
+ Returns: torch.Tensor
1689
+ """
1690
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1691
+ mask = input_ids.ne(padding_idx).int()
1692
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1693
+ return incremental_indices.long() + padding_idx
venv/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for AltCLIP
17
+ """
18
+ import warnings
19
+
20
+ from ...processing_utils import ProcessorMixin
21
+ from ...tokenization_utils_base import BatchEncoding
22
+
23
+
24
+ class AltCLIPProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single
27
+ processor.
28
+
29
+ [`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See
30
+ the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information.
31
+
32
+ Args:
33
+ image_processor ([`CLIPImageProcessor`], *optional*):
34
+ The image processor is a required input.
35
+ tokenizer ([`XLMRobertaTokenizerFast`], *optional*):
36
+ The tokenizer is a required input.
37
+ """
38
+
39
+ attributes = ["image_processor", "tokenizer"]
40
+ image_processor_class = "CLIPImageProcessor"
41
+ tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
42
+
43
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
44
+ feature_extractor = None
45
+ if "feature_extractor" in kwargs:
46
+ warnings.warn(
47
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
48
+ " instead.",
49
+ FutureWarning,
50
+ )
51
+ feature_extractor = kwargs.pop("feature_extractor")
52
+
53
+ image_processor = image_processor if image_processor is not None else feature_extractor
54
+ if image_processor is None:
55
+ raise ValueError("You need to specify an `image_processor`.")
56
+ if tokenizer is None:
57
+ raise ValueError("You need to specify a `tokenizer`.")
58
+
59
+ super().__init__(image_processor, tokenizer)
60
+
61
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
62
+ """
63
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
64
+ and `kwargs` arguments to XLMRobertaTokenizerFast's [`~XLMRobertaTokenizerFast.__call__`] if `text` is not
65
+ `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
66
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
67
+ of the above two methods for more information.
68
+
69
+ Args:
70
+ text (`str`, `List[str]`, `List[List[str]]`):
71
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
72
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
73
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
74
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
75
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
76
+ tensor. Both channels-first and channels-last formats are supported.
77
+
78
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
79
+ If set, will return tensors of a particular framework. Acceptable values are:
80
+
81
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
82
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
83
+ - `'np'`: Return NumPy `np.ndarray` objects.
84
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
85
+
86
+ Returns:
87
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
88
+
89
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
90
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
91
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
92
+ `None`).
93
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
94
+ """
95
+
96
+ if text is None and images is None:
97
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
98
+
99
+ if text is not None:
100
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
101
+
102
+ if images is not None:
103
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
104
+
105
+ if text is not None and images is not None:
106
+ encoding["pixel_values"] = image_features.pixel_values
107
+ return encoding
108
+ elif text is not None:
109
+ return encoding
110
+ else:
111
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
112
+
113
+ def batch_decode(self, *args, **kwargs):
114
+ """
115
+ This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
116
+ Please refer to the docstring of this method for more information.
117
+ """
118
+ return self.tokenizer.batch_decode(*args, **kwargs)
119
+
120
+ def decode(self, *args, **kwargs):
121
+ """
122
+ This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
123
+ refer to the docstring of this method for more information.
124
+ """
125
+ return self.tokenizer.decode(*args, **kwargs)
126
+
127
+ @property
128
+ def model_input_names(self):
129
+ tokenizer_input_names = self.tokenizer.model_input_names
130
+ image_processor_input_names = self.image_processor.model_input_names
131
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
venv/lib/python3.10/site-packages/transformers/models/cpm/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_cpm"] = ["CpmTokenizer"]
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"]
37
+
38
+
39
+ if TYPE_CHECKING:
40
+ try:
41
+ if not is_sentencepiece_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .tokenization_cpm import CpmTokenizer
47
+
48
+ try:
49
+ if not is_tokenizers_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_cpm_fast import CpmTokenizerFast
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (902 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc ADDED
Binary file (9.33 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+ import os
17
+ import unicodedata
18
+ from shutil import copyfile
19
+ from typing import Any, Dict, List, Optional, Tuple
20
+
21
+ import sentencepiece as spm
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import SPIECE_UNDERLINE, logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
30
+
31
+
32
+ class CpmTokenizer(PreTrainedTokenizer):
33
+ """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
34
+
35
+ vocab_files_names = VOCAB_FILES_NAMES
36
+
37
+ def __init__(
38
+ self,
39
+ vocab_file,
40
+ do_lower_case=False,
41
+ remove_space=True,
42
+ keep_accents=False,
43
+ bos_token="<s>",
44
+ eos_token="</s>",
45
+ unk_token="<unk>",
46
+ sep_token="<sep>",
47
+ pad_token="<pad>",
48
+ cls_token="<cls>",
49
+ mask_token="<mask>",
50
+ additional_special_tokens=["<eop>", "<eod>"],
51
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
52
+ **kwargs,
53
+ ) -> None:
54
+ """
55
+ Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
56
+ [SentencePiece](https://github.com/google/sentencepiece).
57
+
58
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
59
+ refer to this superclass for more information regarding those methods.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
64
+ contains the vocabulary necessary to instantiate a tokenizer.
65
+ do_lower_case (`bool`, *optional*, defaults to `True`):
66
+ Whether to lowercase the input when tokenizing.
67
+ remove_space (`bool`, *optional*, defaults to `True`):
68
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
69
+ keep_accents (`bool`, *optional*, defaults to `False`):
70
+ Whether to keep accents when tokenizing.
71
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
72
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
73
+ token.
74
+
75
+ <Tip>
76
+
77
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
78
+ sequence. The token used is the `cls_token`.
79
+
80
+ </Tip>
81
+
82
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
83
+ The end of sequence token.
84
+
85
+ <Tip>
86
+
87
+ When building a sequence using special tokens, this is not the token that is used for the end of
88
+ sequence. The token used is the `sep_token`.
89
+
90
+ </Tip>
91
+
92
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
93
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
94
+ this token instead.
95
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
96
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
97
+ for sequence classification or for a text and a question for question answering. It is also used as the
98
+ last token of a sequence built with special tokens.
99
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
100
+ The token used for padding, for example when batching sequences of different lengths.
101
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
102
+ The classifier token which is used when doing sequence classification (classification of the whole
103
+ sequence instead of per-token classification). It is the first token of the sequence when built with
104
+ special tokens.
105
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
106
+ The token used for masking values. This is the token used when training this model with masked language
107
+ modeling. This is the token which the model will try to predict.
108
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
109
+ Additional special tokens used by the tokenizer.
110
+
111
+ Attributes:
112
+ sp_model (`SentencePieceProcessor`):
113
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
114
+ """
115
+ # Mask token behave like a normal word, i.e. include the space before it
116
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
117
+
118
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
119
+
120
+ self.do_lower_case = do_lower_case
121
+ self.remove_space = remove_space
122
+ self.keep_accents = keep_accents
123
+ self.vocab_file = vocab_file
124
+
125
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
126
+ self.sp_model.Load(vocab_file)
127
+
128
+ try:
129
+ import jieba
130
+ except ModuleNotFoundError as error:
131
+ raise error.__class__(
132
+ "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
133
+ "See https://pypi.org/project/jieba/ for installation."
134
+ )
135
+ self.jieba = jieba
136
+ self.translator = str.maketrans(" \n", "\u2582\u2583")
137
+
138
+ super().__init__(
139
+ do_lower_case=do_lower_case,
140
+ remove_space=remove_space,
141
+ keep_accents=keep_accents,
142
+ bos_token=bos_token,
143
+ eos_token=eos_token,
144
+ unk_token=unk_token,
145
+ sep_token=sep_token,
146
+ pad_token=pad_token,
147
+ cls_token=cls_token,
148
+ mask_token=mask_token,
149
+ additional_special_tokens=additional_special_tokens,
150
+ sp_model_kwargs=self.sp_model_kwargs,
151
+ **kwargs,
152
+ )
153
+
154
+ self._pad_token_type_id = 3
155
+
156
+ @property
157
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
158
+ def vocab_size(self):
159
+ return len(self.sp_model)
160
+
161
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab
162
+ def get_vocab(self):
163
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
164
+ vocab.update(self.added_tokens_encoder)
165
+ return vocab
166
+
167
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__
168
+ def __getstate__(self):
169
+ state = self.__dict__.copy()
170
+ state["sp_model"] = None
171
+ return state
172
+
173
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__
174
+ def __setstate__(self, d):
175
+ self.__dict__ = d
176
+
177
+ # for backward compatibility
178
+ if not hasattr(self, "sp_model_kwargs"):
179
+ self.sp_model_kwargs = {}
180
+
181
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
182
+ self.sp_model.Load(self.vocab_file)
183
+
184
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text
185
+ def preprocess_text(self, inputs):
186
+ if self.remove_space:
187
+ outputs = " ".join(inputs.strip().split())
188
+ else:
189
+ outputs = inputs
190
+ outputs = outputs.replace("``", '"').replace("''", '"')
191
+
192
+ if not self.keep_accents:
193
+ outputs = unicodedata.normalize("NFKD", outputs)
194
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
195
+ if self.do_lower_case:
196
+ outputs = outputs.lower()
197
+
198
+ return outputs
199
+
200
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._tokenize
201
+ def _tokenize(self, text: str) -> List[str]:
202
+ """Tokenize a string."""
203
+ text = self.preprocess_text(text)
204
+ pieces = self.sp_model.encode(text, out_type=str)
205
+ new_pieces = []
206
+ for piece in pieces:
207
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
208
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
209
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
210
+ if len(cur_pieces[0]) == 1:
211
+ cur_pieces = cur_pieces[1:]
212
+ else:
213
+ cur_pieces[0] = cur_pieces[0][1:]
214
+ cur_pieces.append(piece[-1])
215
+ new_pieces.extend(cur_pieces)
216
+ else:
217
+ new_pieces.append(piece)
218
+
219
+ return new_pieces
220
+
221
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_token_to_id
222
+ def _convert_token_to_id(self, token):
223
+ """Converts a token (str) in an id using the vocab."""
224
+ return self.sp_model.PieceToId(token)
225
+
226
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_id_to_token
227
+ def _convert_id_to_token(self, index):
228
+ """Converts an index (integer) in a token (str) using the vocab."""
229
+ return self.sp_model.IdToPiece(index)
230
+
231
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string
232
+ def convert_tokens_to_string(self, tokens):
233
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
234
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
235
+ return out_string
236
+
237
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens
238
+ def build_inputs_with_special_tokens(
239
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
240
+ ) -> List[int]:
241
+ """
242
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
243
+ adding special tokens. An XLNet sequence has the following format:
244
+
245
+ - single sequence: `X <sep> <cls>`
246
+ - pair of sequences: `A <sep> B <sep> <cls>`
247
+
248
+ Args:
249
+ token_ids_0 (`List[int]`):
250
+ List of IDs to which the special tokens will be added.
251
+ token_ids_1 (`List[int]`, *optional*):
252
+ Optional second list of IDs for sequence pairs.
253
+
254
+ Returns:
255
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
256
+ """
257
+ sep = [self.sep_token_id]
258
+ cls = [self.cls_token_id]
259
+ if token_ids_1 is None:
260
+ return token_ids_0 + sep + cls
261
+ return token_ids_0 + sep + token_ids_1 + sep + cls
262
+
263
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask
264
+ def get_special_tokens_mask(
265
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
266
+ ) -> List[int]:
267
+ """
268
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
269
+ special tokens using the tokenizer `prepare_for_model` method.
270
+
271
+ Args:
272
+ token_ids_0 (`List[int]`):
273
+ List of IDs.
274
+ token_ids_1 (`List[int]`, *optional*):
275
+ Optional second list of IDs for sequence pairs.
276
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
277
+ Whether or not the token list is already formatted with special tokens for the model.
278
+
279
+ Returns:
280
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
281
+ """
282
+
283
+ if already_has_special_tokens:
284
+ return super().get_special_tokens_mask(
285
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
286
+ )
287
+
288
+ if token_ids_1 is not None:
289
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
290
+ return ([0] * len(token_ids_0)) + [1, 1]
291
+
292
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences
293
+ def create_token_type_ids_from_sequences(
294
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
295
+ ) -> List[int]:
296
+ """
297
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
298
+ sequence pair mask has the following format:
299
+
300
+ ```
301
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
302
+ | first sequence | second sequence |
303
+ ```
304
+
305
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
306
+
307
+ Args:
308
+ token_ids_0 (`List[int]`):
309
+ List of IDs.
310
+ token_ids_1 (`List[int]`, *optional*):
311
+ Optional second list of IDs for sequence pairs.
312
+
313
+ Returns:
314
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
315
+ """
316
+ sep = [self.sep_token_id]
317
+ cls_segment_id = [2]
318
+
319
+ if token_ids_1 is None:
320
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
321
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
322
+
323
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary
324
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
325
+ if not os.path.isdir(save_directory):
326
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
327
+ return
328
+ out_vocab_file = os.path.join(
329
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
330
+ )
331
+
332
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
333
+ copyfile(self.vocab_file, out_vocab_file)
334
+ elif not os.path.isfile(self.vocab_file):
335
+ with open(out_vocab_file, "wb") as fi:
336
+ content_spiece_model = self.sp_model.serialized_model_proto()
337
+ fi.write(content_spiece_model)
338
+
339
+ return (out_vocab_file,)
340
+
341
+ def _decode(self, *args, **kwargs):
342
+ text = super()._decode(*args, **kwargs)
343
+ text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
344
+ return text
venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
27
+
28
+
29
+ class CpmTokenizerFast(PreTrainedTokenizerFast):
30
+ """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
31
+
32
+ def __init__(
33
+ self,
34
+ vocab_file=None,
35
+ tokenizer_file=None,
36
+ do_lower_case=False,
37
+ remove_space=True,
38
+ keep_accents=False,
39
+ bos_token="<s>",
40
+ eos_token="</s>",
41
+ unk_token="<unk>",
42
+ sep_token="<sep>",
43
+ pad_token="<pad>",
44
+ cls_token="<cls>",
45
+ mask_token="<mask>",
46
+ additional_special_tokens=["<eop>", "<eod>"],
47
+ **kwargs,
48
+ ):
49
+ """
50
+ Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
51
+ [SentencePiece](https://github.com/google/sentencepiece).
52
+
53
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
54
+ refer to this superclass for more information regarding those methods.
55
+
56
+ Args:
57
+ vocab_file (`str`):
58
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
59
+ contains the vocabulary necessary to instantiate a tokenizer.
60
+ do_lower_case (`bool`, *optional*, defaults to `True`):
61
+ Whether to lowercase the input when tokenizing.
62
+ remove_space (`bool`, *optional*, defaults to `True`):
63
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
64
+ keep_accents (`bool`, *optional*, defaults to `False`):
65
+ Whether to keep accents when tokenizing.
66
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
67
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
68
+ token.
69
+
70
+ <Tip>
71
+
72
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
73
+ sequence. The token used is the `cls_token`.
74
+
75
+ </Tip>
76
+
77
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
78
+ The end of sequence token.
79
+
80
+ <Tip>
81
+
82
+ When building a sequence using special tokens, this is not the token that is used for the end of
83
+ sequence. The token used is the `sep_token`.
84
+
85
+ </Tip>
86
+
87
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
88
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
89
+ this token instead.
90
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
91
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
92
+ for sequence classification or for a text and a question for question answering. It is also used as the
93
+ last token of a sequence built with special tokens.
94
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
95
+ The token used for padding, for example when batching sequences of different lengths.
96
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
97
+ The classifier token which is used when doing sequence classification (classification of the whole
98
+ sequence instead of per-token classification). It is the first token of the sequence when built with
99
+ special tokens.
100
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
101
+ The token used for masking values. This is the token used when training this model with masked language
102
+ modeling. This is the token which the model will try to predict.
103
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
104
+ Additional special tokens used by the tokenizer.
105
+
106
+ Attributes:
107
+ sp_model (`SentencePieceProcessor`):
108
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
109
+ """
110
+ # Mask token behave like a normal word, i.e. include the space before it
111
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
112
+
113
+ super().__init__(
114
+ vocab_file=vocab_file,
115
+ tokenizer_file=tokenizer_file,
116
+ do_lower_case=do_lower_case,
117
+ remove_space=remove_space,
118
+ keep_accents=keep_accents,
119
+ bos_token=bos_token,
120
+ eos_token=eos_token,
121
+ unk_token=unk_token,
122
+ sep_token=sep_token,
123
+ pad_token=pad_token,
124
+ cls_token=cls_token,
125
+ mask_token=mask_token,
126
+ additional_special_tokens=additional_special_tokens,
127
+ **kwargs,
128
+ )
129
+
130
+ self._pad_token_type_id = 3
131
+ self.do_lower_case = do_lower_case
132
+ self.remove_space = remove_space
133
+ self.keep_accents = keep_accents
134
+ self.vocab_file = vocab_file
135
+
136
+ try:
137
+ import jieba
138
+ except ModuleNotFoundError as error:
139
+ raise error.__class__(
140
+ "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
141
+ "See https://pypi.org/project/jieba/ for installation."
142
+ )
143
+ self.jieba = jieba
144
+ self.translator = str.maketrans(" \n", "\u2582\u2583")
145
+
146
+ @property
147
+ def can_save_slow_tokenizer(self) -> bool:
148
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
149
+
150
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens
151
+ def build_inputs_with_special_tokens(
152
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
153
+ ) -> List[int]:
154
+ """
155
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
156
+ adding special tokens. An XLNet sequence has the following format:
157
+
158
+ - single sequence: `X <sep> <cls>`
159
+ - pair of sequences: `A <sep> B <sep> <cls>`
160
+
161
+ Args:
162
+ token_ids_0 (`List[int]`):
163
+ List of IDs to which the special tokens will be added.
164
+ token_ids_1 (`List[int]`, *optional*):
165
+ Optional second list of IDs for sequence pairs.
166
+
167
+ Returns:
168
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
169
+ """
170
+ sep = [self.sep_token_id]
171
+ cls = [self.cls_token_id]
172
+ if token_ids_1 is None:
173
+ return token_ids_0 + sep + cls
174
+ return token_ids_0 + sep + token_ids_1 + sep + cls
175
+
176
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences
177
+ def create_token_type_ids_from_sequences(
178
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
179
+ ) -> List[int]:
180
+ """
181
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
182
+ sequence pair mask has the following format:
183
+
184
+ ```
185
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
186
+ | first sequence | second sequence |
187
+ ```
188
+
189
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
190
+
191
+ Args:
192
+ token_ids_0 (`List[int]`):
193
+ List of IDs.
194
+ token_ids_1 (`List[int]`, *optional*):
195
+ Optional second list of IDs for sequence pairs.
196
+
197
+ Returns:
198
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
199
+ """
200
+ sep = [self.sep_token_id]
201
+ cls_segment_id = [2]
202
+
203
+ if token_ids_1 is None:
204
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
205
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
206
+
207
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary
208
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
209
+ if not self.can_save_slow_tokenizer:
210
+ raise ValueError(
211
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
212
+ "tokenizer."
213
+ )
214
+
215
+ if not os.path.isdir(save_directory):
216
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
217
+ return
218
+ out_vocab_file = os.path.join(
219
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
220
+ )
221
+
222
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
223
+ copyfile(self.vocab_file, out_vocab_file)
224
+
225
+ return (out_vocab_file,)
226
+
227
+ def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
228
+ batch_text_or_text_pairs = [
229
+ " ".join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)])
230
+ for text in batch_text_or_text_pairs
231
+ ]
232
+ return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
233
+
234
+ def _decode(self, *args, **kwargs):
235
+ text = super()._decode(*args, **kwargs)
236
+ text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
237
+ return text
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"]
31
+ _import_structure["image_processing_deformable_detr"] = ["DeformableDetrImageProcessor"]
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_deformable_detr"] = [
40
+ "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "DeformableDetrForObjectDetection",
42
+ "DeformableDetrModel",
43
+ "DeformableDetrPreTrainedModel",
44
+ ]
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig
49
+
50
+ try:
51
+ if not is_vision_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor
57
+ from .image_processing_deformable_detr import DeformableDetrImageProcessor
58
+
59
+ try:
60
+ if not is_torch_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .modeling_deformable_detr import (
66
+ DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
67
+ DeformableDetrForObjectDetection,
68
+ DeformableDetrModel,
69
+ DeformableDetrPreTrainedModel,
70
+ )
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc ADDED
Binary file (6.84 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc ADDED
Binary file (51.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc ADDED
Binary file (89.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Deformable DETR model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto import CONFIG_MAPPING
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class DeformableDetrConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate
31
+ a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a
32
+ configuration with the defaults will yield a similar configuration to that of the Deformable DETR
33
+ [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
40
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
41
+ API.
42
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
43
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
44
+ case it will default to `ResNetConfig()`.
45
+ num_channels (`int`, *optional*, defaults to 3):
46
+ The number of input channels.
47
+ num_queries (`int`, *optional*, defaults to 300):
48
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
49
+ [`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use
50
+ `two_stage_num_proposals` instead.
51
+ d_model (`int`, *optional*, defaults to 256):
52
+ Dimension of the layers.
53
+ encoder_layers (`int`, *optional*, defaults to 6):
54
+ Number of encoder layers.
55
+ decoder_layers (`int`, *optional*, defaults to 6):
56
+ Number of decoder layers.
57
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
60
+ Number of attention heads for each attention layer in the Transformer decoder.
61
+ decoder_ffn_dim (`int`, *optional*, defaults to 1024):
62
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
63
+ encoder_ffn_dim (`int`, *optional*, defaults to 1024):
64
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
65
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
66
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
67
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
68
+ dropout (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
70
+ attention_dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout ratio for the attention probabilities.
72
+ activation_dropout (`float`, *optional*, defaults to 0.0):
73
+ The dropout ratio for activations inside the fully connected layer.
74
+ init_std (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ init_xavier_std (`float`, *optional*, defaults to 1):
77
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
78
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
82
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
83
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
84
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
85
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
86
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
87
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
88
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
89
+ use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
90
+ Whether to use pretrained weights for the backbone.
91
+ backbone_kwargs (`dict`, *optional*):
92
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
93
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
94
+ dilation (`bool`, *optional*, defaults to `False`):
95
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
96
+ `use_timm_backbone` = `True`.
97
+ class_cost (`float`, *optional*, defaults to 1):
98
+ Relative weight of the classification error in the Hungarian matching cost.
99
+ bbox_cost (`float`, *optional*, defaults to 5):
100
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
101
+ giou_cost (`float`, *optional*, defaults to 2):
102
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
103
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
104
+ Relative weight of the Focal loss in the panoptic segmentation loss.
105
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
106
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
107
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
108
+ Relative weight of the L1 bounding box loss in the object detection loss.
109
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
110
+ Relative weight of the generalized IoU loss in the object detection loss.
111
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
112
+ Relative classification weight of the 'no-object' class in the object detection loss.
113
+ num_feature_levels (`int`, *optional*, defaults to 4):
114
+ The number of input feature levels.
115
+ encoder_n_points (`int`, *optional*, defaults to 4):
116
+ The number of sampled keys in each feature level for each attention head in the encoder.
117
+ decoder_n_points (`int`, *optional*, defaults to 4):
118
+ The number of sampled keys in each feature level for each attention head in the decoder.
119
+ two_stage (`bool`, *optional*, defaults to `False`):
120
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
121
+ Deformable DETR, which are further fed into the decoder for iterative bounding box refinement.
122
+ two_stage_num_proposals (`int`, *optional*, defaults to 300):
123
+ The number of region proposals to be generated, in case `two_stage` is set to `True`.
124
+ with_box_refine (`bool`, *optional*, defaults to `False`):
125
+ Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
126
+ based on the predictions from the previous layer.
127
+ focal_alpha (`float`, *optional*, defaults to 0.25):
128
+ Alpha parameter in the focal loss.
129
+ disable_custom_kernels (`bool`, *optional*, defaults to `False`):
130
+ Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
131
+ kernels are not supported by PyTorch ONNX export.
132
+
133
+ Examples:
134
+
135
+ ```python
136
+ >>> from transformers import DeformableDetrConfig, DeformableDetrModel
137
+
138
+ >>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration
139
+ >>> configuration = DeformableDetrConfig()
140
+
141
+ >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
142
+ >>> model = DeformableDetrModel(configuration)
143
+
144
+ >>> # Accessing the model configuration
145
+ >>> configuration = model.config
146
+ ```"""
147
+
148
+ model_type = "deformable_detr"
149
+ attribute_map = {
150
+ "hidden_size": "d_model",
151
+ "num_attention_heads": "encoder_attention_heads",
152
+ }
153
+
154
+ def __init__(
155
+ self,
156
+ use_timm_backbone=True,
157
+ backbone_config=None,
158
+ num_channels=3,
159
+ num_queries=300,
160
+ max_position_embeddings=1024,
161
+ encoder_layers=6,
162
+ encoder_ffn_dim=1024,
163
+ encoder_attention_heads=8,
164
+ decoder_layers=6,
165
+ decoder_ffn_dim=1024,
166
+ decoder_attention_heads=8,
167
+ encoder_layerdrop=0.0,
168
+ is_encoder_decoder=True,
169
+ activation_function="relu",
170
+ d_model=256,
171
+ dropout=0.1,
172
+ attention_dropout=0.0,
173
+ activation_dropout=0.0,
174
+ init_std=0.02,
175
+ init_xavier_std=1.0,
176
+ return_intermediate=True,
177
+ auxiliary_loss=False,
178
+ position_embedding_type="sine",
179
+ backbone="resnet50",
180
+ use_pretrained_backbone=True,
181
+ backbone_kwargs=None,
182
+ dilation=False,
183
+ num_feature_levels=4,
184
+ encoder_n_points=4,
185
+ decoder_n_points=4,
186
+ two_stage=False,
187
+ two_stage_num_proposals=300,
188
+ with_box_refine=False,
189
+ class_cost=1,
190
+ bbox_cost=5,
191
+ giou_cost=2,
192
+ mask_loss_coefficient=1,
193
+ dice_loss_coefficient=1,
194
+ bbox_loss_coefficient=5,
195
+ giou_loss_coefficient=2,
196
+ eos_coefficient=0.1,
197
+ focal_alpha=0.25,
198
+ disable_custom_kernels=False,
199
+ **kwargs,
200
+ ):
201
+ if not use_timm_backbone and use_pretrained_backbone:
202
+ raise ValueError(
203
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
204
+ )
205
+
206
+ if backbone_config is not None and backbone is not None:
207
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
208
+
209
+ if backbone_config is not None and use_timm_backbone:
210
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
211
+
212
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
213
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
214
+
215
+ if not use_timm_backbone:
216
+ if backbone_config is None:
217
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
218
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
219
+ elif isinstance(backbone_config, dict):
220
+ backbone_model_type = backbone_config.get("model_type")
221
+ config_class = CONFIG_MAPPING[backbone_model_type]
222
+ backbone_config = config_class.from_dict(backbone_config)
223
+ self.use_timm_backbone = use_timm_backbone
224
+ self.backbone_config = backbone_config
225
+ self.num_channels = num_channels
226
+ self.num_queries = num_queries
227
+ self.max_position_embeddings = max_position_embeddings
228
+ self.d_model = d_model
229
+ self.encoder_ffn_dim = encoder_ffn_dim
230
+ self.encoder_layers = encoder_layers
231
+ self.encoder_attention_heads = encoder_attention_heads
232
+ self.decoder_ffn_dim = decoder_ffn_dim
233
+ self.decoder_layers = decoder_layers
234
+ self.decoder_attention_heads = decoder_attention_heads
235
+ self.dropout = dropout
236
+ self.attention_dropout = attention_dropout
237
+ self.activation_dropout = activation_dropout
238
+ self.activation_function = activation_function
239
+ self.init_std = init_std
240
+ self.init_xavier_std = init_xavier_std
241
+ self.encoder_layerdrop = encoder_layerdrop
242
+ self.auxiliary_loss = auxiliary_loss
243
+ self.position_embedding_type = position_embedding_type
244
+ self.backbone = backbone
245
+ self.use_pretrained_backbone = use_pretrained_backbone
246
+ self.backbone_kwargs = backbone_kwargs
247
+ self.dilation = dilation
248
+ # deformable attributes
249
+ self.num_feature_levels = num_feature_levels
250
+ self.encoder_n_points = encoder_n_points
251
+ self.decoder_n_points = decoder_n_points
252
+ self.two_stage = two_stage
253
+ self.two_stage_num_proposals = two_stage_num_proposals
254
+ self.with_box_refine = with_box_refine
255
+ if two_stage is True and with_box_refine is False:
256
+ raise ValueError("If two_stage is True, with_box_refine must be True.")
257
+ # Hungarian matcher
258
+ self.class_cost = class_cost
259
+ self.bbox_cost = bbox_cost
260
+ self.giou_cost = giou_cost
261
+ # Loss coefficients
262
+ self.mask_loss_coefficient = mask_loss_coefficient
263
+ self.dice_loss_coefficient = dice_loss_coefficient
264
+ self.bbox_loss_coefficient = bbox_loss_coefficient
265
+ self.giou_loss_coefficient = giou_loss_coefficient
266
+ self.eos_coefficient = eos_coefficient
267
+ self.focal_alpha = focal_alpha
268
+ self.disable_custom_kernels = disable_custom_kernels
269
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
270
+
271
+ @property
272
+ def num_attention_heads(self) -> int:
273
+ return self.encoder_attention_heads
274
+
275
+ @property
276
+ def hidden_size(self) -> int:
277
+ return self.d_model
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Deformable DETR checkpoints."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import cached_download, hf_hub_url
25
+ from PIL import Image
26
+
27
+ from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DeformableDetrImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def rename_key(orig_key):
36
+ if "backbone.0.body" in orig_key:
37
+ orig_key = orig_key.replace("backbone.0.body", "backbone.conv_encoder.model")
38
+ if "transformer" in orig_key:
39
+ orig_key = orig_key.replace("transformer.", "")
40
+ if "norm1" in orig_key:
41
+ if "encoder" in orig_key:
42
+ orig_key = orig_key.replace("norm1", "self_attn_layer_norm")
43
+ else:
44
+ orig_key = orig_key.replace("norm1", "encoder_attn_layer_norm")
45
+ if "norm2" in orig_key:
46
+ if "encoder" in orig_key:
47
+ orig_key = orig_key.replace("norm2", "final_layer_norm")
48
+ else:
49
+ orig_key = orig_key.replace("norm2", "self_attn_layer_norm")
50
+ if "norm3" in orig_key:
51
+ orig_key = orig_key.replace("norm3", "final_layer_norm")
52
+ if "linear1" in orig_key:
53
+ orig_key = orig_key.replace("linear1", "fc1")
54
+ if "linear2" in orig_key:
55
+ orig_key = orig_key.replace("linear2", "fc2")
56
+ if "query_embed" in orig_key:
57
+ orig_key = orig_key.replace("query_embed", "query_position_embeddings")
58
+ if "cross_attn" in orig_key:
59
+ orig_key = orig_key.replace("cross_attn", "encoder_attn")
60
+
61
+ return orig_key
62
+
63
+
64
+ def read_in_q_k_v(state_dict):
65
+ # transformer decoder self-attention layers
66
+ for i in range(6):
67
+ # read in weights + bias of input projection layer of self-attention
68
+ in_proj_weight = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_weight")
69
+ in_proj_bias = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_bias")
70
+ # next, add query, keys and values (in that order) to the state dict
71
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
72
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
73
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
74
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
75
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
76
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
77
+
78
+
79
+ # We will verify our results on an image of cute cats
80
+ def prepare_img():
81
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
82
+ im = Image.open(requests.get(url, stream=True).raw)
83
+
84
+ return im
85
+
86
+
87
+ @torch.no_grad()
88
+ def convert_deformable_detr_checkpoint(
89
+ checkpoint_path,
90
+ single_scale,
91
+ dilation,
92
+ with_box_refine,
93
+ two_stage,
94
+ pytorch_dump_folder_path,
95
+ push_to_hub,
96
+ ):
97
+ """
98
+ Copy/paste/tweak model's weights to our Deformable DETR structure.
99
+ """
100
+
101
+ # load default config
102
+ config = DeformableDetrConfig()
103
+ # set config attributes
104
+ if single_scale:
105
+ config.num_feature_levels = 1
106
+ config.dilation = dilation
107
+ config.with_box_refine = with_box_refine
108
+ config.two_stage = two_stage
109
+ # set labels
110
+ config.num_labels = 91
111
+ repo_id = "huggingface/label-files"
112
+ filename = "coco-detection-id2label.json"
113
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
114
+ id2label = {int(k): v for k, v in id2label.items()}
115
+ config.id2label = id2label
116
+ config.label2id = {v: k for k, v in id2label.items()}
117
+
118
+ # load image processor
119
+ image_processor = DeformableDetrImageProcessor(format="coco_detection")
120
+
121
+ # prepare image
122
+ img = prepare_img()
123
+ encoding = image_processor(images=img, return_tensors="pt")
124
+ pixel_values = encoding["pixel_values"]
125
+
126
+ logger.info("Converting model...")
127
+
128
+ # load original state dict
129
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
130
+ # rename keys
131
+ for key in state_dict.copy().keys():
132
+ val = state_dict.pop(key)
133
+ state_dict[rename_key(key)] = val
134
+ # query, key and value matrices need special treatment
135
+ read_in_q_k_v(state_dict)
136
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
137
+ prefix = "model."
138
+ for key in state_dict.copy().keys():
139
+ if not key.startswith("class_embed") and not key.startswith("bbox_embed"):
140
+ val = state_dict.pop(key)
141
+ state_dict[prefix + key] = val
142
+ # finally, create HuggingFace model and load state dict
143
+ model = DeformableDetrForObjectDetection(config)
144
+ model.load_state_dict(state_dict)
145
+ model.eval()
146
+
147
+ device = "cuda" if torch.cuda.is_available() else "cpu"
148
+ model.to(device)
149
+ # verify our conversion
150
+ outputs = model(pixel_values.to(device))
151
+
152
+ expected_logits = torch.tensor(
153
+ [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]]
154
+ )
155
+ expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]])
156
+
157
+ if single_scale:
158
+ expected_logits = torch.tensor(
159
+ [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]]
160
+ )
161
+ expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]])
162
+
163
+ if single_scale and dilation:
164
+ expected_logits = torch.tensor(
165
+ [[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]]
166
+ )
167
+ expected_boxes = torch.tensor([[0.7665, 0.4130, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]])
168
+
169
+ if with_box_refine:
170
+ expected_logits = torch.tensor(
171
+ [[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]]
172
+ )
173
+ expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]])
174
+
175
+ if with_box_refine and two_stage:
176
+ expected_logits = torch.tensor(
177
+ [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]]
178
+ )
179
+ expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]])
180
+
181
+ print("Logits:", outputs.logits[0, :3, :3])
182
+
183
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
184
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
185
+
186
+ print("Everything ok!")
187
+
188
+ # Save model and image processor
189
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
190
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
191
+ model.save_pretrained(pytorch_dump_folder_path)
192
+ image_processor.save_pretrained(pytorch_dump_folder_path)
193
+
194
+ # Push to hub
195
+ if push_to_hub:
196
+ model_name = "deformable-detr"
197
+ model_name += "-single-scale" if single_scale else ""
198
+ model_name += "-dc5" if dilation else ""
199
+ model_name += "-with-box-refine" if with_box_refine else ""
200
+ model_name += "-two-stage" if two_stage else ""
201
+ print("Pushing model to hub...")
202
+ model.push_to_hub(repo_path_or_name=model_name, organization="nielsr", commit_message="Add model")
203
+
204
+
205
+ if __name__ == "__main__":
206
+ parser = argparse.ArgumentParser()
207
+
208
+ parser.add_argument(
209
+ "--checkpoint_path",
210
+ type=str,
211
+ default="/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth",
212
+ help="Path to Pytorch checkpoint (.pth file) you'd like to convert.",
213
+ )
214
+ parser.add_argument("--single_scale", action="store_true", help="Whether to set config.num_features_levels = 1.")
215
+ parser.add_argument("--dilation", action="store_true", help="Whether to set config.dilation=True.")
216
+ parser.add_argument("--with_box_refine", action="store_true", help="Whether to set config.with_box_refine=True.")
217
+ parser.add_argument("--two_stage", action="store_true", help="Whether to set config.two_stage=True.")
218
+ parser.add_argument(
219
+ "--pytorch_dump_folder_path",
220
+ default=None,
221
+ type=str,
222
+ required=True,
223
+ help="Path to the folder to output PyTorch model.",
224
+ )
225
+ parser.add_argument(
226
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
227
+ )
228
+ args = parser.parse_args()
229
+ convert_deformable_detr_checkpoint(
230
+ args.checkpoint_path,
231
+ args.single_scale,
232
+ args.dilation,
233
+ args.with_box_refine,
234
+ args.two_stage,
235
+ args.pytorch_dump_folder_path,
236
+ args.push_to_hub,
237
+ )
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Deformable DETR."""
16
+
17
+ import warnings
18
+
19
+ from ...image_transforms import rgb_to_id as _rgb_to_id
20
+ from ...utils import logging
21
+ from .image_processing_deformable_detr import DeformableDetrImageProcessor
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def rgb_to_id(x):
28
+ warnings.warn(
29
+ "rgb_to_id has moved and will not be importable from this module from v5. "
30
+ "Please import from transformers.image_transforms instead.",
31
+ FutureWarning,
32
+ )
33
+ return _rgb_to_id(x)
34
+
35
+
36
+ class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor):
37
+ def __init__(self, *args, **kwargs) -> None:
38
+ warnings.warn(
39
+ "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
40
+ " Please use DeformableDetrImageProcessor instead.",
41
+ FutureWarning,
42
+ )
43
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py ADDED
@@ -0,0 +1,1553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Deformable DETR."""
16
+
17
+ import io
18
+ import pathlib
19
+ from collections import defaultdict
20
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...feature_extraction_utils import BatchFeature
25
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
26
+ from ...image_transforms import (
27
+ PaddingMode,
28
+ center_to_corners_format,
29
+ corners_to_center_format,
30
+ id_to_rgb,
31
+ pad,
32
+ rescale,
33
+ resize,
34
+ rgb_to_id,
35
+ to_channel_dimension_format,
36
+ )
37
+ from ...image_utils import (
38
+ IMAGENET_DEFAULT_MEAN,
39
+ IMAGENET_DEFAULT_STD,
40
+ AnnotationFormat,
41
+ AnnotationType,
42
+ ChannelDimension,
43
+ ImageInput,
44
+ PILImageResampling,
45
+ get_image_size,
46
+ infer_channel_dimension_format,
47
+ is_scaled_image,
48
+ make_list_of_images,
49
+ to_numpy_array,
50
+ valid_images,
51
+ validate_annotations,
52
+ validate_kwargs,
53
+ validate_preprocess_arguments,
54
+ )
55
+ from ...utils import (
56
+ TensorType,
57
+ is_flax_available,
58
+ is_jax_tensor,
59
+ is_scipy_available,
60
+ is_tf_available,
61
+ is_tf_tensor,
62
+ is_torch_available,
63
+ is_torch_tensor,
64
+ is_vision_available,
65
+ logging,
66
+ )
67
+
68
+
69
+ if is_torch_available():
70
+ import torch
71
+ from torch import nn
72
+
73
+
74
+ if is_vision_available():
75
+ import PIL
76
+
77
+ if is_scipy_available():
78
+ import scipy.special
79
+ import scipy.stats
80
+
81
+
82
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
83
+
84
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
85
+
86
+
87
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
88
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
89
+ """
90
+ Computes the output image size given the input image size and the desired output size.
91
+
92
+ Args:
93
+ image_size (`Tuple[int, int]`):
94
+ The input image size.
95
+ size (`int`):
96
+ The desired output size.
97
+ max_size (`int`, *optional*):
98
+ The maximum allowed output size.
99
+ """
100
+ height, width = image_size
101
+ if max_size is not None:
102
+ min_original_size = float(min((height, width)))
103
+ max_original_size = float(max((height, width)))
104
+ if max_original_size / min_original_size * size > max_size:
105
+ size = int(round(max_size * min_original_size / max_original_size))
106
+
107
+ if (height <= width and height == size) or (width <= height and width == size):
108
+ return height, width
109
+
110
+ if width < height:
111
+ ow = size
112
+ oh = int(size * height / width)
113
+ else:
114
+ oh = size
115
+ ow = int(size * width / height)
116
+ return (oh, ow)
117
+
118
+
119
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
120
+ def get_resize_output_image_size(
121
+ input_image: np.ndarray,
122
+ size: Union[int, Tuple[int, int], List[int]],
123
+ max_size: Optional[int] = None,
124
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
125
+ ) -> Tuple[int, int]:
126
+ """
127
+ Computes the output image size given the input image size and the desired output size. If the desired output size
128
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
129
+ image size is computed by keeping the aspect ratio of the input image size.
130
+
131
+ Args:
132
+ input_image (`np.ndarray`):
133
+ The image to resize.
134
+ size (`int` or `Tuple[int, int]` or `List[int]`):
135
+ The desired output size.
136
+ max_size (`int`, *optional*):
137
+ The maximum allowed output size.
138
+ input_data_format (`ChannelDimension` or `str`, *optional*):
139
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
140
+ """
141
+ image_size = get_image_size(input_image, input_data_format)
142
+ if isinstance(size, (list, tuple)):
143
+ return size
144
+
145
+ return get_size_with_aspect_ratio(image_size, size, max_size)
146
+
147
+
148
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
149
+ def get_numpy_to_framework_fn(arr) -> Callable:
150
+ """
151
+ Returns a function that converts a numpy array to the framework of the input array.
152
+
153
+ Args:
154
+ arr (`np.ndarray`): The array to convert.
155
+ """
156
+ if isinstance(arr, np.ndarray):
157
+ return np.array
158
+ if is_tf_available() and is_tf_tensor(arr):
159
+ import tensorflow as tf
160
+
161
+ return tf.convert_to_tensor
162
+ if is_torch_available() and is_torch_tensor(arr):
163
+ import torch
164
+
165
+ return torch.tensor
166
+ if is_flax_available() and is_jax_tensor(arr):
167
+ import jax.numpy as jnp
168
+
169
+ return jnp.array
170
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
171
+
172
+
173
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
174
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
175
+ """
176
+ Squeezes an array, but only if the axis specified has dim 1.
177
+ """
178
+ if axis is None:
179
+ return arr.squeeze()
180
+
181
+ try:
182
+ return arr.squeeze(axis=axis)
183
+ except ValueError:
184
+ return arr
185
+
186
+
187
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
188
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
189
+ image_height, image_width = image_size
190
+ norm_annotation = {}
191
+ for key, value in annotation.items():
192
+ if key == "boxes":
193
+ boxes = value
194
+ boxes = corners_to_center_format(boxes)
195
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
196
+ norm_annotation[key] = boxes
197
+ else:
198
+ norm_annotation[key] = value
199
+ return norm_annotation
200
+
201
+
202
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
203
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
204
+ """
205
+ Return the maximum value across all indices of an iterable of values.
206
+ """
207
+ return [max(values_i) for values_i in zip(*values)]
208
+
209
+
210
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
211
+ def get_max_height_width(
212
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
213
+ ) -> List[int]:
214
+ """
215
+ Get the maximum height and width across all images in a batch.
216
+ """
217
+ if input_data_format is None:
218
+ input_data_format = infer_channel_dimension_format(images[0])
219
+
220
+ if input_data_format == ChannelDimension.FIRST:
221
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
222
+ elif input_data_format == ChannelDimension.LAST:
223
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
224
+ else:
225
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
226
+ return (max_height, max_width)
227
+
228
+
229
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
230
+ def make_pixel_mask(
231
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
232
+ ) -> np.ndarray:
233
+ """
234
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
235
+
236
+ Args:
237
+ image (`np.ndarray`):
238
+ Image to make the pixel mask for.
239
+ output_size (`Tuple[int, int]`):
240
+ Output size of the mask.
241
+ """
242
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
243
+ mask = np.zeros(output_size, dtype=np.int64)
244
+ mask[:input_height, :input_width] = 1
245
+ return mask
246
+
247
+
248
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
249
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
250
+ """
251
+ Convert a COCO polygon annotation to a mask.
252
+
253
+ Args:
254
+ segmentations (`List[List[float]]`):
255
+ List of polygons, each polygon represented by a list of x-y coordinates.
256
+ height (`int`):
257
+ Height of the mask.
258
+ width (`int`):
259
+ Width of the mask.
260
+ """
261
+ try:
262
+ from pycocotools import mask as coco_mask
263
+ except ImportError:
264
+ raise ImportError("Pycocotools is not installed in your environment.")
265
+
266
+ masks = []
267
+ for polygons in segmentations:
268
+ rles = coco_mask.frPyObjects(polygons, height, width)
269
+ mask = coco_mask.decode(rles)
270
+ if len(mask.shape) < 3:
271
+ mask = mask[..., None]
272
+ mask = np.asarray(mask, dtype=np.uint8)
273
+ mask = np.any(mask, axis=2)
274
+ masks.append(mask)
275
+ if masks:
276
+ masks = np.stack(masks, axis=0)
277
+ else:
278
+ masks = np.zeros((0, height, width), dtype=np.uint8)
279
+
280
+ return masks
281
+
282
+
283
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr
284
+ def prepare_coco_detection_annotation(
285
+ image,
286
+ target,
287
+ return_segmentation_masks: bool = False,
288
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
289
+ ):
290
+ """
291
+ Convert the target in COCO format into the format expected by DeformableDetr.
292
+ """
293
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
294
+
295
+ image_id = target["image_id"]
296
+ image_id = np.asarray([image_id], dtype=np.int64)
297
+
298
+ # Get all COCO annotations for the given image.
299
+ annotations = target["annotations"]
300
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
301
+
302
+ classes = [obj["category_id"] for obj in annotations]
303
+ classes = np.asarray(classes, dtype=np.int64)
304
+
305
+ # for conversion to coco api
306
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
307
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
308
+
309
+ boxes = [obj["bbox"] for obj in annotations]
310
+ # guard against no boxes via resizing
311
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
312
+ boxes[:, 2:] += boxes[:, :2]
313
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
314
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
315
+
316
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
317
+
318
+ new_target = {}
319
+ new_target["image_id"] = image_id
320
+ new_target["class_labels"] = classes[keep]
321
+ new_target["boxes"] = boxes[keep]
322
+ new_target["area"] = area[keep]
323
+ new_target["iscrowd"] = iscrowd[keep]
324
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
325
+
326
+ if annotations and "keypoints" in annotations[0]:
327
+ keypoints = [obj["keypoints"] for obj in annotations]
328
+ # Converting the filtered keypoints list to a numpy array
329
+ keypoints = np.asarray(keypoints, dtype=np.float32)
330
+ # Apply the keep mask here to filter the relevant annotations
331
+ keypoints = keypoints[keep]
332
+ num_keypoints = keypoints.shape[0]
333
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
334
+ new_target["keypoints"] = keypoints
335
+
336
+ if return_segmentation_masks:
337
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
338
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
339
+ new_target["masks"] = masks[keep]
340
+
341
+ return new_target
342
+
343
+
344
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
345
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
346
+ """
347
+ Compute the bounding boxes around the provided panoptic segmentation masks.
348
+
349
+ Args:
350
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
351
+
352
+ Returns:
353
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
354
+ """
355
+ if masks.size == 0:
356
+ return np.zeros((0, 4))
357
+
358
+ h, w = masks.shape[-2:]
359
+ y = np.arange(0, h, dtype=np.float32)
360
+ x = np.arange(0, w, dtype=np.float32)
361
+ # see https://github.com/pytorch/pytorch/issues/50276
362
+ y, x = np.meshgrid(y, x, indexing="ij")
363
+
364
+ x_mask = masks * np.expand_dims(x, axis=0)
365
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
366
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
367
+ x_min = x.filled(fill_value=1e8)
368
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
369
+
370
+ y_mask = masks * np.expand_dims(y, axis=0)
371
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
372
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
373
+ y_min = y.filled(fill_value=1e8)
374
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
375
+
376
+ return np.stack([x_min, y_min, x_max, y_max], 1)
377
+
378
+
379
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr
380
+ def prepare_coco_panoptic_annotation(
381
+ image: np.ndarray,
382
+ target: Dict,
383
+ masks_path: Union[str, pathlib.Path],
384
+ return_masks: bool = True,
385
+ input_data_format: Union[ChannelDimension, str] = None,
386
+ ) -> Dict:
387
+ """
388
+ Prepare a coco panoptic annotation for DeformableDetr.
389
+ """
390
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
391
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
392
+
393
+ new_target = {}
394
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
395
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
396
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
397
+
398
+ if "segments_info" in target:
399
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
400
+ masks = rgb_to_id(masks)
401
+
402
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
403
+ masks = masks == ids[:, None, None]
404
+ masks = masks.astype(np.uint8)
405
+ if return_masks:
406
+ new_target["masks"] = masks
407
+ new_target["boxes"] = masks_to_boxes(masks)
408
+ new_target["class_labels"] = np.array(
409
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
410
+ )
411
+ new_target["iscrowd"] = np.asarray(
412
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
413
+ )
414
+ new_target["area"] = np.asarray(
415
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
416
+ )
417
+
418
+ return new_target
419
+
420
+
421
+ # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
422
+ def get_segmentation_image(
423
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
424
+ ):
425
+ h, w = input_size
426
+ final_h, final_w = target_size
427
+
428
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
429
+
430
+ if m_id.shape[-1] == 0:
431
+ # We didn't detect any mask :(
432
+ m_id = np.zeros((h, w), dtype=np.int64)
433
+ else:
434
+ m_id = m_id.argmax(-1).reshape(h, w)
435
+
436
+ if deduplicate:
437
+ # Merge the masks corresponding to the same stuff class
438
+ for equiv in stuff_equiv_classes.values():
439
+ for eq_id in equiv:
440
+ m_id[m_id == eq_id] = equiv[0]
441
+
442
+ seg_img = id_to_rgb(m_id)
443
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
444
+ return seg_img
445
+
446
+
447
+ # Copied from transformers.models.detr.image_processing_detr.get_mask_area
448
+ def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
449
+ final_h, final_w = target_size
450
+ np_seg_img = seg_img.astype(np.uint8)
451
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
452
+ m_id = rgb_to_id(np_seg_img)
453
+ area = [(m_id == i).sum() for i in range(n_classes)]
454
+ return area
455
+
456
+
457
+ # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
458
+ def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
459
+ probs = scipy.special.softmax(logits, axis=-1)
460
+ labels = probs.argmax(-1, keepdims=True)
461
+ scores = np.take_along_axis(probs, labels, axis=-1)
462
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
463
+ return scores, labels
464
+
465
+
466
+ # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample
467
+ def post_process_panoptic_sample(
468
+ out_logits: np.ndarray,
469
+ masks: np.ndarray,
470
+ boxes: np.ndarray,
471
+ processed_size: Tuple[int, int],
472
+ target_size: Tuple[int, int],
473
+ is_thing_map: Dict,
474
+ threshold=0.85,
475
+ ) -> Dict:
476
+ """
477
+ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
478
+
479
+ Args:
480
+ out_logits (`torch.Tensor`):
481
+ The logits for this sample.
482
+ masks (`torch.Tensor`):
483
+ The predicted segmentation masks for this sample.
484
+ boxes (`torch.Tensor`):
485
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
486
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
487
+ processed_size (`Tuple[int, int]`):
488
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
489
+ after data augmentation but before batching.
490
+ target_size (`Tuple[int, int]`):
491
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
492
+ prediction.
493
+ is_thing_map (`Dict`):
494
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
495
+ threshold (`float`, *optional*, defaults to 0.85):
496
+ The threshold used to binarize the segmentation masks.
497
+ """
498
+ # we filter empty queries and detection below threshold
499
+ scores, labels = score_labels_from_class_probabilities(out_logits)
500
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
501
+
502
+ cur_scores = scores[keep]
503
+ cur_classes = labels[keep]
504
+ cur_boxes = center_to_corners_format(boxes[keep])
505
+
506
+ if len(cur_boxes) != len(cur_classes):
507
+ raise ValueError("Not as many boxes as there are classes")
508
+
509
+ cur_masks = masks[keep]
510
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
511
+ cur_masks = safe_squeeze(cur_masks, 1)
512
+ b, h, w = cur_masks.shape
513
+
514
+ # It may be that we have several predicted masks for the same stuff class.
515
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
516
+ cur_masks = cur_masks.reshape(b, -1)
517
+ stuff_equiv_classes = defaultdict(list)
518
+ for k, label in enumerate(cur_classes):
519
+ if not is_thing_map[label]:
520
+ stuff_equiv_classes[label].append(k)
521
+
522
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
523
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
524
+
525
+ # We filter out any mask that is too small
526
+ if cur_classes.size() > 0:
527
+ # We know filter empty masks as long as we find some
528
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
529
+ while filtered_small.any():
530
+ cur_masks = cur_masks[~filtered_small]
531
+ cur_scores = cur_scores[~filtered_small]
532
+ cur_classes = cur_classes[~filtered_small]
533
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
534
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
535
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
536
+ else:
537
+ cur_classes = np.ones((1, 1), dtype=np.int64)
538
+
539
+ segments_info = [
540
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
541
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
542
+ ]
543
+ del cur_classes
544
+
545
+ with io.BytesIO() as out:
546
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
547
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
548
+
549
+ return predictions
550
+
551
+
552
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
553
+ def resize_annotation(
554
+ annotation: Dict[str, Any],
555
+ orig_size: Tuple[int, int],
556
+ target_size: Tuple[int, int],
557
+ threshold: float = 0.5,
558
+ resample: PILImageResampling = PILImageResampling.NEAREST,
559
+ ):
560
+ """
561
+ Resizes an annotation to a target size.
562
+
563
+ Args:
564
+ annotation (`Dict[str, Any]`):
565
+ The annotation dictionary.
566
+ orig_size (`Tuple[int, int]`):
567
+ The original size of the input image.
568
+ target_size (`Tuple[int, int]`):
569
+ The target size of the image, as returned by the preprocessing `resize` step.
570
+ threshold (`float`, *optional*, defaults to 0.5):
571
+ The threshold used to binarize the segmentation masks.
572
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
573
+ The resampling filter to use when resizing the masks.
574
+ """
575
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
576
+ ratio_height, ratio_width = ratios
577
+
578
+ new_annotation = {}
579
+ new_annotation["size"] = target_size
580
+
581
+ for key, value in annotation.items():
582
+ if key == "boxes":
583
+ boxes = value
584
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
585
+ new_annotation["boxes"] = scaled_boxes
586
+ elif key == "area":
587
+ area = value
588
+ scaled_area = area * (ratio_width * ratio_height)
589
+ new_annotation["area"] = scaled_area
590
+ elif key == "masks":
591
+ masks = value[:, None]
592
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
593
+ masks = masks.astype(np.float32)
594
+ masks = masks[:, 0] > threshold
595
+ new_annotation["masks"] = masks
596
+ elif key == "size":
597
+ new_annotation["size"] = target_size
598
+ else:
599
+ new_annotation[key] = value
600
+
601
+ return new_annotation
602
+
603
+
604
+ # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
605
+ def binary_mask_to_rle(mask):
606
+ """
607
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
608
+
609
+ Args:
610
+ mask (`torch.Tensor` or `numpy.array`):
611
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
612
+ segment_id or class_id.
613
+ Returns:
614
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
615
+ format.
616
+ """
617
+ if is_torch_tensor(mask):
618
+ mask = mask.numpy()
619
+
620
+ pixels = mask.flatten()
621
+ pixels = np.concatenate([[0], pixels, [0]])
622
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
623
+ runs[1::2] -= runs[::2]
624
+ return list(runs)
625
+
626
+
627
+ # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
628
+ def convert_segmentation_to_rle(segmentation):
629
+ """
630
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
631
+
632
+ Args:
633
+ segmentation (`torch.Tensor` or `numpy.array`):
634
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
635
+ Returns:
636
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
637
+ """
638
+ segment_ids = torch.unique(segmentation)
639
+
640
+ run_length_encodings = []
641
+ for idx in segment_ids:
642
+ mask = torch.where(segmentation == idx, 1, 0)
643
+ rle = binary_mask_to_rle(mask)
644
+ run_length_encodings.append(rle)
645
+
646
+ return run_length_encodings
647
+
648
+
649
+ # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
650
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
651
+ """
652
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
653
+ `labels`.
654
+
655
+ Args:
656
+ masks (`torch.Tensor`):
657
+ A tensor of shape `(num_queries, height, width)`.
658
+ scores (`torch.Tensor`):
659
+ A tensor of shape `(num_queries)`.
660
+ labels (`torch.Tensor`):
661
+ A tensor of shape `(num_queries)`.
662
+ object_mask_threshold (`float`):
663
+ A number between 0 and 1 used to binarize the masks.
664
+ Raises:
665
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
666
+ Returns:
667
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
668
+ < `object_mask_threshold`.
669
+ """
670
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
671
+ raise ValueError("mask, scores and labels must have the same shape!")
672
+
673
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
674
+
675
+ return masks[to_keep], scores[to_keep], labels[to_keep]
676
+
677
+
678
+ # Copied from transformers.models.detr.image_processing_detr.check_segment_validity
679
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
680
+ # Get the mask associated with the k class
681
+ mask_k = mask_labels == k
682
+ mask_k_area = mask_k.sum()
683
+
684
+ # Compute the area of all the stuff in query k
685
+ original_area = (mask_probs[k] >= mask_threshold).sum()
686
+ mask_exists = mask_k_area > 0 and original_area > 0
687
+
688
+ # Eliminate disconnected tiny segments
689
+ if mask_exists:
690
+ area_ratio = mask_k_area / original_area
691
+ if not area_ratio.item() > overlap_mask_area_threshold:
692
+ mask_exists = False
693
+
694
+ return mask_exists, mask_k
695
+
696
+
697
+ # Copied from transformers.models.detr.image_processing_detr.compute_segments
698
+ def compute_segments(
699
+ mask_probs,
700
+ pred_scores,
701
+ pred_labels,
702
+ mask_threshold: float = 0.5,
703
+ overlap_mask_area_threshold: float = 0.8,
704
+ label_ids_to_fuse: Optional[Set[int]] = None,
705
+ target_size: Tuple[int, int] = None,
706
+ ):
707
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
708
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
709
+
710
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
711
+ segments: List[Dict] = []
712
+
713
+ if target_size is not None:
714
+ mask_probs = nn.functional.interpolate(
715
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
716
+ )[0]
717
+
718
+ current_segment_id = 0
719
+
720
+ # Weigh each mask by its prediction score
721
+ mask_probs *= pred_scores.view(-1, 1, 1)
722
+ mask_labels = mask_probs.argmax(0) # [height, width]
723
+
724
+ # Keep track of instances of each class
725
+ stuff_memory_list: Dict[str, int] = {}
726
+ for k in range(pred_labels.shape[0]):
727
+ pred_class = pred_labels[k].item()
728
+ should_fuse = pred_class in label_ids_to_fuse
729
+
730
+ # Check if mask exists and large enough to be a segment
731
+ mask_exists, mask_k = check_segment_validity(
732
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
733
+ )
734
+
735
+ if mask_exists:
736
+ if pred_class in stuff_memory_list:
737
+ current_segment_id = stuff_memory_list[pred_class]
738
+ else:
739
+ current_segment_id += 1
740
+
741
+ # Add current object segment to final segmentation map
742
+ segmentation[mask_k] = current_segment_id
743
+ segment_score = round(pred_scores[k].item(), 6)
744
+ segments.append(
745
+ {
746
+ "id": current_segment_id,
747
+ "label_id": pred_class,
748
+ "was_fused": should_fuse,
749
+ "score": segment_score,
750
+ }
751
+ )
752
+ if should_fuse:
753
+ stuff_memory_list[pred_class] = current_segment_id
754
+
755
+ return segmentation, segments
756
+
757
+
758
+ class DeformableDetrImageProcessor(BaseImageProcessor):
759
+ r"""
760
+ Constructs a Deformable DETR image processor.
761
+
762
+ Args:
763
+ format (`str`, *optional*, defaults to `"coco_detection"`):
764
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
765
+ do_resize (`bool`, *optional*, defaults to `True`):
766
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
767
+ overridden by the `do_resize` parameter in the `preprocess` method.
768
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
769
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
770
+ the `preprocess` method.
771
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
772
+ Resampling filter to use if resizing the image.
773
+ do_rescale (`bool`, *optional*, defaults to `True`):
774
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
775
+ `do_rescale` parameter in the `preprocess` method.
776
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
777
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
778
+ `preprocess` method.
779
+ do_normalize:
780
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
781
+ `preprocess` method.
782
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
783
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
784
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
785
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
786
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
787
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
788
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
789
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
790
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
791
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
792
+ do_pad (`bool`, *optional*, defaults to `True`):
793
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
794
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
795
+ Padding will be applied to the bottom and right of the image with zeros.
796
+ """
797
+
798
+ model_input_names = ["pixel_values", "pixel_mask"]
799
+
800
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
801
+ def __init__(
802
+ self,
803
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
804
+ do_resize: bool = True,
805
+ size: Dict[str, int] = None,
806
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
807
+ do_rescale: bool = True,
808
+ rescale_factor: Union[int, float] = 1 / 255,
809
+ do_normalize: bool = True,
810
+ image_mean: Union[float, List[float]] = None,
811
+ image_std: Union[float, List[float]] = None,
812
+ do_convert_annotations: Optional[bool] = None,
813
+ do_pad: bool = True,
814
+ **kwargs,
815
+ ) -> None:
816
+ if "pad_and_return_pixel_mask" in kwargs:
817
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
818
+
819
+ if "max_size" in kwargs:
820
+ logger.warning_once(
821
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
822
+ "Please specify in `size['longest_edge'] instead`.",
823
+ )
824
+ max_size = kwargs.pop("max_size")
825
+ else:
826
+ max_size = None if size is None else 1333
827
+
828
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
829
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
830
+
831
+ # Backwards compatibility
832
+ if do_convert_annotations is None:
833
+ do_convert_annotations = do_normalize
834
+
835
+ super().__init__(**kwargs)
836
+ self.format = format
837
+ self.do_resize = do_resize
838
+ self.size = size
839
+ self.resample = resample
840
+ self.do_rescale = do_rescale
841
+ self.rescale_factor = rescale_factor
842
+ self.do_normalize = do_normalize
843
+ self.do_convert_annotations = do_convert_annotations
844
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
845
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
846
+ self.do_pad = do_pad
847
+ self._valid_processor_keys = [
848
+ "images",
849
+ "annotations",
850
+ "return_segmentation_masks",
851
+ "masks_path",
852
+ "do_resize",
853
+ "size",
854
+ "resample",
855
+ "do_rescale",
856
+ "rescale_factor",
857
+ "do_normalize",
858
+ "do_convert_annotations",
859
+ "image_mean",
860
+ "image_std",
861
+ "do_pad",
862
+ "format",
863
+ "return_tensors",
864
+ "data_format",
865
+ "input_data_format",
866
+ ]
867
+
868
+ @classmethod
869
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr
870
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
871
+ """
872
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
873
+ created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600,
874
+ max_size=800)`
875
+ """
876
+ image_processor_dict = image_processor_dict.copy()
877
+ if "max_size" in kwargs:
878
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
879
+ if "pad_and_return_pixel_mask" in kwargs:
880
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
881
+ return super().from_dict(image_processor_dict, **kwargs)
882
+
883
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr
884
+ def prepare_annotation(
885
+ self,
886
+ image: np.ndarray,
887
+ target: Dict,
888
+ format: Optional[AnnotationFormat] = None,
889
+ return_segmentation_masks: bool = None,
890
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
891
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
892
+ ) -> Dict:
893
+ """
894
+ Prepare an annotation for feeding into DeformableDetr model.
895
+ """
896
+ format = format if format is not None else self.format
897
+
898
+ if format == AnnotationFormat.COCO_DETECTION:
899
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
900
+ target = prepare_coco_detection_annotation(
901
+ image, target, return_segmentation_masks, input_data_format=input_data_format
902
+ )
903
+ elif format == AnnotationFormat.COCO_PANOPTIC:
904
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
905
+ target = prepare_coco_panoptic_annotation(
906
+ image,
907
+ target,
908
+ masks_path=masks_path,
909
+ return_masks=return_segmentation_masks,
910
+ input_data_format=input_data_format,
911
+ )
912
+ else:
913
+ raise ValueError(f"Format {format} is not supported.")
914
+ return target
915
+
916
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
917
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
918
+ logger.warning_once(
919
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
920
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
921
+ "does not return the image anymore.",
922
+ )
923
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
924
+ return image, target
925
+
926
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
927
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
928
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
929
+ return convert_coco_poly_to_mask(*args, **kwargs)
930
+
931
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
932
+ def prepare_coco_detection(self, *args, **kwargs):
933
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
934
+ return prepare_coco_detection_annotation(*args, **kwargs)
935
+
936
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
937
+ def prepare_coco_panoptic(self, *args, **kwargs):
938
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
939
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
940
+
941
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
942
+ def resize(
943
+ self,
944
+ image: np.ndarray,
945
+ size: Dict[str, int],
946
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
947
+ data_format: Optional[ChannelDimension] = None,
948
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
949
+ **kwargs,
950
+ ) -> np.ndarray:
951
+ """
952
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
953
+ int, smaller edge of the image will be matched to this number.
954
+
955
+ Args:
956
+ image (`np.ndarray`):
957
+ Image to resize.
958
+ size (`Dict[str, int]`):
959
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
960
+ `height` and `width`.
961
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
962
+ Resampling filter to use if resizing the image.
963
+ data_format (`str` or `ChannelDimension`, *optional*):
964
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
965
+ image is used.
966
+ input_data_format (`ChannelDimension` or `str`, *optional*):
967
+ The channel dimension format of the input image. If not provided, it will be inferred.
968
+ """
969
+ if "max_size" in kwargs:
970
+ logger.warning_once(
971
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
972
+ "Please specify in `size['longest_edge'] instead`.",
973
+ )
974
+ max_size = kwargs.pop("max_size")
975
+ else:
976
+ max_size = None
977
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
978
+ if "shortest_edge" in size and "longest_edge" in size:
979
+ size = get_resize_output_image_size(
980
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
981
+ )
982
+ elif "height" in size and "width" in size:
983
+ size = (size["height"], size["width"])
984
+ else:
985
+ raise ValueError(
986
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
987
+ f" {size.keys()}."
988
+ )
989
+ image = resize(
990
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
991
+ )
992
+ return image
993
+
994
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
995
+ def resize_annotation(
996
+ self,
997
+ annotation,
998
+ orig_size,
999
+ size,
1000
+ resample: PILImageResampling = PILImageResampling.NEAREST,
1001
+ ) -> Dict:
1002
+ """
1003
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
1004
+ to this number.
1005
+ """
1006
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
1007
+
1008
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
1009
+ def rescale(
1010
+ self,
1011
+ image: np.ndarray,
1012
+ rescale_factor: float,
1013
+ data_format: Optional[Union[str, ChannelDimension]] = None,
1014
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1015
+ ) -> np.ndarray:
1016
+ """
1017
+ Rescale the image by the given factor. image = image * rescale_factor.
1018
+
1019
+ Args:
1020
+ image (`np.ndarray`):
1021
+ Image to rescale.
1022
+ rescale_factor (`float`):
1023
+ The value to use for rescaling.
1024
+ data_format (`str` or `ChannelDimension`, *optional*):
1025
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
1026
+ image is used. Can be one of:
1027
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1028
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1029
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1030
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
1031
+ one of:
1032
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1033
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1034
+ """
1035
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
1036
+
1037
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
1038
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
1039
+ """
1040
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
1041
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
1042
+ """
1043
+ return normalize_annotation(annotation, image_size=image_size)
1044
+
1045
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
1046
+ def _update_annotation_for_padded_image(
1047
+ self,
1048
+ annotation: Dict,
1049
+ input_image_size: Tuple[int, int],
1050
+ output_image_size: Tuple[int, int],
1051
+ padding,
1052
+ update_bboxes,
1053
+ ) -> Dict:
1054
+ """
1055
+ Update the annotation for a padded image.
1056
+ """
1057
+ new_annotation = {}
1058
+ new_annotation["size"] = output_image_size
1059
+
1060
+ for key, value in annotation.items():
1061
+ if key == "masks":
1062
+ masks = value
1063
+ masks = pad(
1064
+ masks,
1065
+ padding,
1066
+ mode=PaddingMode.CONSTANT,
1067
+ constant_values=0,
1068
+ input_data_format=ChannelDimension.FIRST,
1069
+ )
1070
+ masks = safe_squeeze(masks, 1)
1071
+ new_annotation["masks"] = masks
1072
+ elif key == "boxes" and update_bboxes:
1073
+ boxes = value
1074
+ boxes *= np.asarray(
1075
+ [
1076
+ input_image_size[1] / output_image_size[1],
1077
+ input_image_size[0] / output_image_size[0],
1078
+ input_image_size[1] / output_image_size[1],
1079
+ input_image_size[0] / output_image_size[0],
1080
+ ]
1081
+ )
1082
+ new_annotation["boxes"] = boxes
1083
+ elif key == "size":
1084
+ new_annotation["size"] = output_image_size
1085
+ else:
1086
+ new_annotation[key] = value
1087
+ return new_annotation
1088
+
1089
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
1090
+ def _pad_image(
1091
+ self,
1092
+ image: np.ndarray,
1093
+ output_size: Tuple[int, int],
1094
+ annotation: Optional[Dict[str, Any]] = None,
1095
+ constant_values: Union[float, Iterable[float]] = 0,
1096
+ data_format: Optional[ChannelDimension] = None,
1097
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1098
+ update_bboxes: bool = True,
1099
+ ) -> np.ndarray:
1100
+ """
1101
+ Pad an image with zeros to the given size.
1102
+ """
1103
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
1104
+ output_height, output_width = output_size
1105
+
1106
+ pad_bottom = output_height - input_height
1107
+ pad_right = output_width - input_width
1108
+ padding = ((0, pad_bottom), (0, pad_right))
1109
+ padded_image = pad(
1110
+ image,
1111
+ padding,
1112
+ mode=PaddingMode.CONSTANT,
1113
+ constant_values=constant_values,
1114
+ data_format=data_format,
1115
+ input_data_format=input_data_format,
1116
+ )
1117
+ if annotation is not None:
1118
+ annotation = self._update_annotation_for_padded_image(
1119
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
1120
+ )
1121
+ return padded_image, annotation
1122
+
1123
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
1124
+ def pad(
1125
+ self,
1126
+ images: List[np.ndarray],
1127
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1128
+ constant_values: Union[float, Iterable[float]] = 0,
1129
+ return_pixel_mask: bool = True,
1130
+ return_tensors: Optional[Union[str, TensorType]] = None,
1131
+ data_format: Optional[ChannelDimension] = None,
1132
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1133
+ update_bboxes: bool = True,
1134
+ ) -> BatchFeature:
1135
+ """
1136
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
1137
+ in the batch and optionally returns their corresponding pixel mask.
1138
+
1139
+ Args:
1140
+ images (List[`np.ndarray`]):
1141
+ Images to pad.
1142
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1143
+ Annotations to transform according to the padding that is applied to the images.
1144
+ constant_values (`float` or `Iterable[float]`, *optional*):
1145
+ The value to use for the padding if `mode` is `"constant"`.
1146
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
1147
+ Whether to return a pixel mask.
1148
+ return_tensors (`str` or `TensorType`, *optional*):
1149
+ The type of tensors to return. Can be one of:
1150
+ - Unset: Return a list of `np.ndarray`.
1151
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
1152
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
1153
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
1154
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
1155
+ data_format (`str` or `ChannelDimension`, *optional*):
1156
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
1157
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1158
+ The channel dimension format of the input image. If not provided, it will be inferred.
1159
+ update_bboxes (`bool`, *optional*, defaults to `True`):
1160
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
1161
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
1162
+ format, the bounding boxes will not be updated.
1163
+ """
1164
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
1165
+
1166
+ annotation_list = annotations if annotations is not None else [None] * len(images)
1167
+ padded_images = []
1168
+ padded_annotations = []
1169
+ for image, annotation in zip(images, annotation_list):
1170
+ padded_image, padded_annotation = self._pad_image(
1171
+ image,
1172
+ pad_size,
1173
+ annotation,
1174
+ constant_values=constant_values,
1175
+ data_format=data_format,
1176
+ input_data_format=input_data_format,
1177
+ update_bboxes=update_bboxes,
1178
+ )
1179
+ padded_images.append(padded_image)
1180
+ padded_annotations.append(padded_annotation)
1181
+
1182
+ data = {"pixel_values": padded_images}
1183
+
1184
+ if return_pixel_mask:
1185
+ masks = [
1186
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
1187
+ for image in images
1188
+ ]
1189
+ data["pixel_mask"] = masks
1190
+
1191
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1192
+
1193
+ if annotations is not None:
1194
+ encoded_inputs["labels"] = [
1195
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
1196
+ ]
1197
+
1198
+ return encoded_inputs
1199
+
1200
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
1201
+ def preprocess(
1202
+ self,
1203
+ images: ImageInput,
1204
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1205
+ return_segmentation_masks: bool = None,
1206
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
1207
+ do_resize: Optional[bool] = None,
1208
+ size: Optional[Dict[str, int]] = None,
1209
+ resample=None, # PILImageResampling
1210
+ do_rescale: Optional[bool] = None,
1211
+ rescale_factor: Optional[Union[int, float]] = None,
1212
+ do_normalize: Optional[bool] = None,
1213
+ do_convert_annotations: Optional[bool] = None,
1214
+ image_mean: Optional[Union[float, List[float]]] = None,
1215
+ image_std: Optional[Union[float, List[float]]] = None,
1216
+ do_pad: Optional[bool] = None,
1217
+ format: Optional[Union[str, AnnotationFormat]] = None,
1218
+ return_tensors: Optional[Union[TensorType, str]] = None,
1219
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
1220
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1221
+ **kwargs,
1222
+ ) -> BatchFeature:
1223
+ """
1224
+ Preprocess an image or a batch of images so that it can be used by the model.
1225
+
1226
+ Args:
1227
+ images (`ImageInput`):
1228
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
1229
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
1230
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1231
+ List of annotations associated with the image or batch of images. If annotation is for object
1232
+ detection, the annotations should be a dictionary with the following keys:
1233
+ - "image_id" (`int`): The image id.
1234
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
1235
+ dictionary. An image can have no annotations, in which case the list should be empty.
1236
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
1237
+ - "image_id" (`int`): The image id.
1238
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
1239
+ An image can have no segments, in which case the list should be empty.
1240
+ - "file_name" (`str`): The file name of the image.
1241
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
1242
+ Whether to return segmentation masks.
1243
+ masks_path (`str` or `pathlib.Path`, *optional*):
1244
+ Path to the directory containing the segmentation masks.
1245
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
1246
+ Whether to resize the image.
1247
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
1248
+ Size of the image after resizing.
1249
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
1250
+ Resampling filter to use when resizing the image.
1251
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
1252
+ Whether to rescale the image.
1253
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
1254
+ Rescale factor to use when rescaling the image.
1255
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
1256
+ Whether to normalize the image.
1257
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
1258
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
1259
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
1260
+ and in relative coordinates.
1261
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
1262
+ Mean to use when normalizing the image.
1263
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
1264
+ Standard deviation to use when normalizing the image.
1265
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
1266
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
1267
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
1268
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
1269
+ Format of the annotations.
1270
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
1271
+ Type of tensors to return. If `None`, will return the list of images.
1272
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
1273
+ The channel dimension format for the output image. Can be one of:
1274
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1275
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1276
+ - Unset: Use the channel dimension format of the input image.
1277
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1278
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
1279
+ from the input image. Can be one of:
1280
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1281
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1282
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1283
+ """
1284
+ if "pad_and_return_pixel_mask" in kwargs:
1285
+ logger.warning_once(
1286
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
1287
+ "use `do_pad` instead."
1288
+ )
1289
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
1290
+
1291
+ max_size = None
1292
+ if "max_size" in kwargs:
1293
+ logger.warning_once(
1294
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
1295
+ " `size['longest_edge']` instead."
1296
+ )
1297
+ size = kwargs.pop("max_size")
1298
+
1299
+ do_resize = self.do_resize if do_resize is None else do_resize
1300
+ size = self.size if size is None else size
1301
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
1302
+ resample = self.resample if resample is None else resample
1303
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
1304
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
1305
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
1306
+ image_mean = self.image_mean if image_mean is None else image_mean
1307
+ image_std = self.image_std if image_std is None else image_std
1308
+ do_convert_annotations = (
1309
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
1310
+ )
1311
+ do_pad = self.do_pad if do_pad is None else do_pad
1312
+ format = self.format if format is None else format
1313
+
1314
+ images = make_list_of_images(images)
1315
+
1316
+ if not valid_images(images):
1317
+ raise ValueError(
1318
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
1319
+ "torch.Tensor, tf.Tensor or jax.ndarray."
1320
+ )
1321
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
1322
+
1323
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
1324
+ validate_preprocess_arguments(
1325
+ do_rescale=do_rescale,
1326
+ rescale_factor=rescale_factor,
1327
+ do_normalize=do_normalize,
1328
+ image_mean=image_mean,
1329
+ image_std=image_std,
1330
+ do_resize=do_resize,
1331
+ size=size,
1332
+ resample=resample,
1333
+ )
1334
+
1335
+ if annotations is not None and isinstance(annotations, dict):
1336
+ annotations = [annotations]
1337
+
1338
+ if annotations is not None and len(images) != len(annotations):
1339
+ raise ValueError(
1340
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
1341
+ )
1342
+
1343
+ format = AnnotationFormat(format)
1344
+ if annotations is not None:
1345
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
1346
+
1347
+ if (
1348
+ masks_path is not None
1349
+ and format == AnnotationFormat.COCO_PANOPTIC
1350
+ and not isinstance(masks_path, (pathlib.Path, str))
1351
+ ):
1352
+ raise ValueError(
1353
+ "The path to the directory containing the mask PNG files should be provided as a"
1354
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
1355
+ )
1356
+
1357
+ # All transformations expect numpy arrays
1358
+ images = [to_numpy_array(image) for image in images]
1359
+
1360
+ if is_scaled_image(images[0]) and do_rescale:
1361
+ logger.warning_once(
1362
+ "It looks like you are trying to rescale already rescaled images. If the input"
1363
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1364
+ )
1365
+
1366
+ if input_data_format is None:
1367
+ # We assume that all images have the same channel dimension format.
1368
+ input_data_format = infer_channel_dimension_format(images[0])
1369
+
1370
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1371
+ if annotations is not None:
1372
+ prepared_images = []
1373
+ prepared_annotations = []
1374
+ for image, target in zip(images, annotations):
1375
+ target = self.prepare_annotation(
1376
+ image,
1377
+ target,
1378
+ format,
1379
+ return_segmentation_masks=return_segmentation_masks,
1380
+ masks_path=masks_path,
1381
+ input_data_format=input_data_format,
1382
+ )
1383
+ prepared_images.append(image)
1384
+ prepared_annotations.append(target)
1385
+ images = prepared_images
1386
+ annotations = prepared_annotations
1387
+ del prepared_images, prepared_annotations
1388
+
1389
+ # transformations
1390
+ if do_resize:
1391
+ if annotations is not None:
1392
+ resized_images, resized_annotations = [], []
1393
+ for image, target in zip(images, annotations):
1394
+ orig_size = get_image_size(image, input_data_format)
1395
+ resized_image = self.resize(
1396
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
1397
+ )
1398
+ resized_annotation = self.resize_annotation(
1399
+ target, orig_size, get_image_size(resized_image, input_data_format)
1400
+ )
1401
+ resized_images.append(resized_image)
1402
+ resized_annotations.append(resized_annotation)
1403
+ images = resized_images
1404
+ annotations = resized_annotations
1405
+ del resized_images, resized_annotations
1406
+ else:
1407
+ images = [
1408
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1409
+ for image in images
1410
+ ]
1411
+
1412
+ if do_rescale:
1413
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1414
+
1415
+ if do_normalize:
1416
+ images = [
1417
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1418
+ ]
1419
+
1420
+ if do_convert_annotations and annotations is not None:
1421
+ annotations = [
1422
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1423
+ for annotation, image in zip(annotations, images)
1424
+ ]
1425
+
1426
+ if do_pad:
1427
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1428
+ encoded_inputs = self.pad(
1429
+ images,
1430
+ annotations=annotations,
1431
+ return_pixel_mask=True,
1432
+ data_format=data_format,
1433
+ input_data_format=input_data_format,
1434
+ update_bboxes=do_convert_annotations,
1435
+ return_tensors=return_tensors,
1436
+ )
1437
+ else:
1438
+ images = [
1439
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1440
+ for image in images
1441
+ ]
1442
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1443
+ if annotations is not None:
1444
+ encoded_inputs["labels"] = [
1445
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1446
+ ]
1447
+
1448
+ return encoded_inputs
1449
+
1450
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
1451
+ def post_process(self, outputs, target_sizes):
1452
+ """
1453
+ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
1454
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1455
+
1456
+ Args:
1457
+ outputs ([`DeformableDetrObjectDetectionOutput`]):
1458
+ Raw outputs of the model.
1459
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1460
+ Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
1461
+ original image size (before any data augmentation). For visualization, this should be the image size
1462
+ after data augment, but before padding.
1463
+ Returns:
1464
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1465
+ in the batch as predicted by the model.
1466
+ """
1467
+ logger.warning_once(
1468
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
1469
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
1470
+ )
1471
+
1472
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1473
+
1474
+ if len(out_logits) != len(target_sizes):
1475
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
1476
+ if target_sizes.shape[1] != 2:
1477
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
1478
+
1479
+ prob = out_logits.sigmoid()
1480
+ topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
1481
+ scores = topk_values
1482
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1483
+ labels = topk_indexes % out_logits.shape[2]
1484
+ boxes = center_to_corners_format(out_bbox)
1485
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1486
+
1487
+ # and from relative [0, 1] to absolute [0, height] coordinates
1488
+ img_h, img_w = target_sizes.unbind(1)
1489
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
1490
+ boxes = boxes * scale_fct[:, None, :]
1491
+
1492
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
1493
+
1494
+ return results
1495
+
1496
+ def post_process_object_detection(
1497
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100
1498
+ ):
1499
+ """
1500
+ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
1501
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1502
+
1503
+ Args:
1504
+ outputs ([`DetrObjectDetectionOutput`]):
1505
+ Raw outputs of the model.
1506
+ threshold (`float`, *optional*):
1507
+ Score threshold to keep object detection predictions.
1508
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1509
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1510
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1511
+ top_k (`int`, *optional*, defaults to 100):
1512
+ Keep only top k bounding boxes before filtering by thresholding.
1513
+
1514
+ Returns:
1515
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1516
+ in the batch as predicted by the model.
1517
+ """
1518
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1519
+
1520
+ if target_sizes is not None:
1521
+ if len(out_logits) != len(target_sizes):
1522
+ raise ValueError(
1523
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1524
+ )
1525
+
1526
+ prob = out_logits.sigmoid()
1527
+ prob = prob.view(out_logits.shape[0], -1)
1528
+ k_value = min(top_k, prob.size(1))
1529
+ topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
1530
+ scores = topk_values
1531
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1532
+ labels = topk_indexes % out_logits.shape[2]
1533
+ boxes = center_to_corners_format(out_bbox)
1534
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1535
+
1536
+ # and from relative [0, 1] to absolute [0, height] coordinates
1537
+ if target_sizes is not None:
1538
+ if isinstance(target_sizes, List):
1539
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1540
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1541
+ else:
1542
+ img_h, img_w = target_sizes.unbind(1)
1543
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1544
+ boxes = boxes * scale_fct[:, None, :]
1545
+
1546
+ results = []
1547
+ for s, l, b in zip(scores, labels, boxes):
1548
+ score = s[s > threshold]
1549
+ label = l[s > threshold]
1550
+ box = b[s > threshold]
1551
+ results.append({"scores": score, "labels": label, "boxes": box})
1552
+
1553
+ return results
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Loading of Deformable DETR's CUDA kernels"""
16
+ import os
17
+ from pathlib import Path
18
+
19
+
20
+ def load_cuda_kernels():
21
+ from torch.utils.cpp_extension import load
22
+
23
+ root = Path(__file__).resolve().parent.parent.parent / "kernels" / "deformable_detr"
24
+ src_files = [
25
+ root / filename
26
+ for filename in [
27
+ "vision.cpp",
28
+ os.path.join("cpu", "ms_deform_attn_cpu.cpp"),
29
+ os.path.join("cuda", "ms_deform_attn_cuda.cu"),
30
+ ]
31
+ ]
32
+
33
+ load(
34
+ "MultiScaleDeformableAttention",
35
+ src_files,
36
+ with_cuda=True,
37
+ extra_include_paths=[str(root)],
38
+ extra_cflags=["-DWITH_CUDA=1"],
39
+ extra_cuda_cflags=[
40
+ "-DCUDA_HAS_FP16=1",
41
+ "-D__CUDA_NO_HALF_OPERATORS__",
42
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
43
+ "-D__CUDA_NO_HALF2_OPERATORS__",
44
+ ],
45
+ )
46
+
47
+ import MultiScaleDeformableAttention as MSDA
48
+
49
+ return MSDA
venv/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/jamba/__init__.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_jamba": ["JambaConfig"],
21
+ }
22
+
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_jamba"] = [
31
+ "JambaForCausalLM",
32
+ "JambaForSequenceClassification",
33
+ "JambaModel",
34
+ "JambaPreTrainedModel",
35
+ ]
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ from .configuration_jamba import JambaConfig
40
+
41
+ try:
42
+ if not is_torch_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ from .modeling_jamba import (
48
+ JambaForCausalLM,
49
+ JambaForSequenceClassification,
50
+ JambaModel,
51
+ JambaPreTrainedModel,
52
+ )
53
+
54
+
55
+ else:
56
+ import sys
57
+
58
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (843 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc ADDED
Binary file (9.97 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc ADDED
Binary file (51.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Jamba model configuration"""
16
+ import math
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class JambaConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`JambaModel`]. It is used to instantiate a
28
+ Jamba model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of the Jamba-v0.1 model.
30
+
31
+ [ai21labs/Jamba-v0.1](https://huggingface.co/ai21labs/Jamba-v0.1)
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 65536):
39
+ Vocabulary size of the Jamba model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`JambaModel`]
41
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
42
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
43
+ model has a output word embedding layer.
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 14336):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*, defaults to 8):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
60
+ The non-linear activation function (function or string) in the decoder.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
69
+ Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
70
+ integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
71
+ logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
72
+ sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
73
+ significantly.
74
+ output_router_logits (`bool`, *optional*, defaults to `False`):
75
+ Whether or not the router logits should be returned by the model. Enabling this will also
76
+ allow the model to output the auxiliary loss. See [here]() for more details
77
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
78
+ The aux loss factor for the total loss.
79
+ pad_token_id (`int`, *optional*, defaults to 0):
80
+ The id of the padding token.
81
+ bos_token_id (`int`, *optional*, defaults to 1):
82
+ The id of the "beginning-of-sequence" token.
83
+ eos_token_id (`int`, *optional*, defaults to 2):
84
+ The id of the "end-of-sequence" token.
85
+ sliding_window (`int`, *optional*):
86
+ Sliding window attention window size. If not specified, will default to `None`.
87
+ max_position_embeddings (`int`, *optional*, defaults to 262144):
88
+ This value doesn't have any real effect. The maximum sequence length that this model is intended to be
89
+ used with. It can be used with longer sequences, but performance may degrade.
90
+ attention_dropout (`float`, *optional*, defaults to 0.0):
91
+ The dropout ratio for the attention probabilities.
92
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
93
+ The number of experts to root per-token, can be also interpreted as the `top-p` routing
94
+ parameter
95
+ num_experts (`int`, *optional*, defaults to 16):
96
+ Number of experts per Sparse MLP layer.
97
+ expert_layer_period (`int`, *optional*, defaults to 2):
98
+ Once in this many layers, we will have an expert layer
99
+ expert_layer_offset (`int`, *optional*, defaults to 1):
100
+ The first layer index that contains an expert mlp layer
101
+ attn_layer_period (`int`, *optional*, defaults to 8):
102
+ Once in this many layers, we will have a vanilla attention layer
103
+ attn_layer_offset (`int`, *optional*, defaults to 4):
104
+ The first layer index that contains a vanilla attention mlp layer
105
+ use_mamba_kernels (`bool`, *optional*, defaults to `True`):
106
+ Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
107
+ `causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if
108
+ `True` and kernels are not available
109
+ mamba_d_state (`int`, *optional*, defaults to 16):
110
+ The dimension the mamba state space latents
111
+ mamba_d_conv (`int`, *optional*, defaults to 4):
112
+ The size of the mamba convolution kernel
113
+ mamba_expand (`int`, *optional*, defaults to 2):
114
+ Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
115
+ mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
116
+ Rank of the the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
117
+ mamba_conv_bias (`bool`, *optional*, defaults to `True`):
118
+ Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
119
+ mamba_proj_bias (`bool`, *optional*, defaults to `False`):
120
+ Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
121
+
122
+ """
123
+
124
+ model_type = "jamba"
125
+ keys_to_ignore_at_inference = ["past_key_values"]
126
+
127
+ def __init__(
128
+ self,
129
+ vocab_size=65536,
130
+ tie_word_embeddings=False,
131
+ hidden_size=4096,
132
+ intermediate_size=14336,
133
+ num_hidden_layers=32,
134
+ num_attention_heads=32,
135
+ num_key_value_heads=8,
136
+ hidden_act="silu",
137
+ initializer_range=0.02,
138
+ rms_norm_eps=1e-6,
139
+ use_cache=True,
140
+ num_logits_to_keep=1,
141
+ output_router_logits=False,
142
+ router_aux_loss_coef=0.001,
143
+ pad_token_id=0,
144
+ bos_token_id=1,
145
+ eos_token_id=2,
146
+ sliding_window=None,
147
+ max_position_embeddings=262144,
148
+ attention_dropout=0.0,
149
+ num_experts_per_tok=2,
150
+ num_experts=16,
151
+ expert_layer_period=2,
152
+ expert_layer_offset=1,
153
+ attn_layer_period=8,
154
+ attn_layer_offset=4,
155
+ use_mamba_kernels=True,
156
+ mamba_d_state=16,
157
+ mamba_d_conv=4,
158
+ mamba_expand=2,
159
+ mamba_dt_rank="auto",
160
+ mamba_conv_bias=True,
161
+ mamba_proj_bias=False,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.tie_word_embeddings = tie_word_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.num_hidden_layers = num_hidden_layers
169
+ self.num_attention_heads = num_attention_heads
170
+ self.sliding_window = sliding_window
171
+ self.max_position_embeddings = max_position_embeddings
172
+ self.attention_dropout = attention_dropout
173
+
174
+ # for backward compatibility
175
+ if num_key_value_heads is None:
176
+ num_key_value_heads = num_attention_heads
177
+
178
+ self.num_key_value_heads = num_key_value_heads
179
+ self.hidden_act = hidden_act
180
+ self.initializer_range = initializer_range
181
+ self.rms_norm_eps = rms_norm_eps
182
+
183
+ self.use_cache = use_cache
184
+ self.num_logits_to_keep = num_logits_to_keep
185
+ self.output_router_logits = output_router_logits
186
+ self.router_aux_loss_coef = router_aux_loss_coef
187
+
188
+ self.num_experts_per_tok = num_experts_per_tok
189
+ self.num_experts = num_experts
190
+ self.expert_layer_period = expert_layer_period
191
+ self.expert_layer_offset = expert_layer_offset
192
+ self.attn_layer_period = attn_layer_period
193
+ self.attn_layer_offset = attn_layer_offset
194
+
195
+ self.use_mamba_kernels = use_mamba_kernels
196
+ self.mamba_d_state = mamba_d_state
197
+ self.mamba_d_conv = mamba_d_conv
198
+ self.mamba_expand = mamba_expand
199
+ self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == "auto" else mamba_dt_rank
200
+ self.mamba_conv_bias = mamba_conv_bias
201
+ self.mamba_proj_bias = mamba_proj_bias
202
+
203
+ super().__init__(
204
+ pad_token_id=pad_token_id,
205
+ bos_token_id=bos_token_id,
206
+ eos_token_id=eos_token_id,
207
+ tie_word_embeddings=tie_word_embeddings,
208
+ **kwargs,
209
+ )
210
+
211
+ @property
212
+ def layers_block_type(self):
213
+ return [
214
+ "attention" if i % self.attn_layer_period == self.attn_layer_offset else "mamba"
215
+ for i in range(self.num_hidden_layers)
216
+ ]
217
+
218
+ @property
219
+ def layers_num_experts(self):
220
+ return [
221
+ self.num_experts if i % self.expert_layer_period == self.expert_layer_offset else 1
222
+ for i in range(self.num_hidden_layers)
223
+ ]
venv/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py ADDED
@@ -0,0 +1,1882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Jamba model."""
21
+ import inspect
22
+ import math
23
+ from typing import Any, Dict, List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.nn.functional as F
27
+ import torch.utils.checkpoint
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import ACT2FN
32
+ from ...cache_utils import DynamicCache # we need __iter__ and __len__ of pkv
33
+ from ...modeling_attn_mask_utils import (
34
+ AttentionMaskConverter,
35
+ )
36
+ from ...modeling_outputs import (
37
+ MoeCausalLMOutputWithPast,
38
+ MoeModelOutputWithPast,
39
+ SequenceClassifierOutputWithPast,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...utils import (
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ is_flash_attn_greater_or_equal_2_10,
46
+ logging,
47
+ replace_return_docstrings,
48
+ )
49
+ from ...utils.import_utils import (
50
+ is_causal_conv1d_available,
51
+ is_flash_attn_2_available,
52
+ is_mamba_ssm_available,
53
+ )
54
+ from .configuration_jamba import JambaConfig
55
+
56
+
57
+ if is_flash_attn_2_available():
58
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
59
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
60
+
61
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
62
+
63
+
64
+ if is_mamba_ssm_available():
65
+ from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn
66
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
67
+ else:
68
+ selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None
69
+
70
+ if is_causal_conv1d_available():
71
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
72
+ else:
73
+ causal_conv1d_update, causal_conv1d_fn = None, None
74
+
75
+ is_fast_path_available = all(
76
+ (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)
77
+ )
78
+
79
+
80
+ logger = logging.get_logger(__name__)
81
+
82
+ _CONFIG_FOR_DOC = "JambaConfig"
83
+
84
+
85
+ # Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func with gate->router
86
+ def load_balancing_loss_func(
87
+ router_logits: torch.Tensor,
88
+ num_experts: torch.Tensor = None,
89
+ top_k=2,
90
+ attention_mask: Optional[torch.Tensor] = None,
91
+ ) -> float:
92
+ r"""
93
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
94
+
95
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
96
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
97
+ experts is too unbalanced.
98
+
99
+ Args:
100
+ router_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
101
+ Logits from the `router`, should be a tuple of model.config.num_hidden_layers tensors of
102
+ shape [batch_size X sequence_length, num_experts].
103
+ attention_mask (`torch.Tensor`, None):
104
+ The attention_mask used in forward function
105
+ shape [batch_size X sequence_length] if not None.
106
+ num_experts (`int`, *optional*):
107
+ Number of experts
108
+
109
+ Returns:
110
+ The auxiliary loss.
111
+ """
112
+ if router_logits is None or not isinstance(router_logits, tuple):
113
+ return 0
114
+
115
+ if isinstance(router_logits, tuple):
116
+ compute_device = router_logits[0].device
117
+ concatenated_router_logits = torch.cat(
118
+ [layer_router.to(compute_device) for layer_router in router_logits], dim=0
119
+ )
120
+
121
+ routing_weights = torch.nn.functional.softmax(concatenated_router_logits, dim=-1)
122
+
123
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
124
+
125
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
126
+
127
+ if attention_mask is None:
128
+ # Compute the percentage of tokens routed to each experts
129
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
130
+
131
+ # Compute the average probability of routing to these experts
132
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
133
+ else:
134
+ batch_size, sequence_length = attention_mask.shape
135
+ num_hidden_layers = concatenated_router_logits.shape[0] // (batch_size * sequence_length)
136
+
137
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
138
+ expert_attention_mask = (
139
+ attention_mask[None, :, :, None, None]
140
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
141
+ .reshape(-1, top_k, num_experts)
142
+ .to(compute_device)
143
+ )
144
+
145
+ # Compute the percentage of tokens routed to each experts
146
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
147
+ expert_attention_mask, dim=0
148
+ )
149
+
150
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
151
+ router_per_expert_attention_mask = (
152
+ attention_mask[None, :, :, None]
153
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
154
+ .reshape(-1, num_experts)
155
+ .to(compute_device)
156
+ )
157
+
158
+ # Compute the average probability of routing to these experts
159
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
160
+ router_per_expert_attention_mask, dim=0
161
+ )
162
+
163
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
164
+ return overall_loss * num_experts
165
+
166
+
167
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
168
+ def _get_unpad_data(attention_mask):
169
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
170
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
171
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
172
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
173
+ return (
174
+ indices,
175
+ cu_seqlens,
176
+ max_seqlen_in_batch,
177
+ )
178
+
179
+
180
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Jamba
181
+ class JambaRMSNorm(nn.Module):
182
+ def __init__(self, hidden_size, eps=1e-6):
183
+ """
184
+ JambaRMSNorm is equivalent to T5LayerNorm
185
+ """
186
+ super().__init__()
187
+ self.weight = nn.Parameter(torch.ones(hidden_size))
188
+ self.variance_epsilon = eps
189
+
190
+ def forward(self, hidden_states):
191
+ input_dtype = hidden_states.dtype
192
+ hidden_states = hidden_states.to(torch.float32)
193
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
194
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
195
+ return self.weight * hidden_states.to(input_dtype)
196
+
197
+
198
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
199
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
200
+ """
201
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
202
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
203
+ """
204
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
205
+ if n_rep == 1:
206
+ return hidden_states
207
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
208
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
209
+
210
+
211
+ class HybridMambaAttentionDynamicCache(DynamicCache):
212
+ """
213
+ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
214
+ (which has a constant shape regardless of seq_len).
215
+
216
+ This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
217
+ and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
218
+ For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
219
+ while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
220
+ For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
221
+ while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
222
+ and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
223
+ """
224
+
225
+ def __init__(self, config, batch_size, dtype=torch.float16, device=None):
226
+ self.dtype = dtype
227
+ self.layers_block_type = config.layers_block_type
228
+ self.has_previous_state = False # only used by mamba
229
+ intermediate_size = config.mamba_expand * config.hidden_size
230
+ ssm_state_size = config.mamba_d_state
231
+ conv_kernel_size = config.mamba_d_conv
232
+ self.conv_states = []
233
+ self.ssm_states = []
234
+ for i in range(config.num_hidden_layers):
235
+ if self.layers_block_type[i] == "mamba":
236
+ self.conv_states += [
237
+ torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype)
238
+ ]
239
+ self.ssm_states += [
240
+ torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype)
241
+ ]
242
+ else:
243
+ self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
244
+ self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
245
+
246
+ self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
247
+ self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
248
+
249
+ def update(
250
+ self,
251
+ key_states: torch.Tensor,
252
+ value_states: torch.Tensor,
253
+ layer_idx: int,
254
+ cache_kwargs: Optional[Dict[str, Any]] = None,
255
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
256
+ # Update the cache
257
+ if self.key_cache[layer_idx].shape[-1] == 0:
258
+ self.key_cache[layer_idx] = key_states
259
+ self.value_cache[layer_idx] = value_states
260
+ else:
261
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
262
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
263
+
264
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
265
+
266
+ def reorder_cache(self, beam_idx: torch.LongTensor):
267
+ """Reorders the cache for beam search, given the selected beam indices."""
268
+ for layer_idx in range(len(self.key_cache)):
269
+ device = self.key_cache[layer_idx].device
270
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
271
+ device = self.value_cache[layer_idx].device
272
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
273
+
274
+ device = self.conv_states[layer_idx].device
275
+ self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
276
+ device = self.ssm_states[layer_idx].device
277
+ self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
278
+
279
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
280
+ raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
281
+
282
+ @classmethod
283
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
284
+ raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
285
+
286
+
287
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Jamba
288
+ class JambaAttention(nn.Module):
289
+ """
290
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
291
+ and "Generating Long Sequences with Sparse Transformers".
292
+ """
293
+
294
+ def __init__(self, config: JambaConfig, layer_idx: Optional[int] = None):
295
+ super().__init__()
296
+ self.config = config
297
+ self.layer_idx = layer_idx
298
+ if layer_idx is None:
299
+ logger.warning_once(
300
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
301
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
302
+ "when creating this class."
303
+ )
304
+
305
+ self.hidden_size = config.hidden_size
306
+ self.num_heads = config.num_attention_heads
307
+ self.head_dim = self.hidden_size // self.num_heads
308
+ self.num_key_value_heads = config.num_key_value_heads
309
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
310
+ self.is_causal = True
311
+ self.attention_dropout = config.attention_dropout
312
+
313
+ if (self.head_dim * self.num_heads) != self.hidden_size:
314
+ raise ValueError(
315
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
316
+ f" and `num_heads`: {self.num_heads})."
317
+ )
318
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
319
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
320
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
321
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
322
+
323
+ def forward(
324
+ self,
325
+ hidden_states: torch.Tensor,
326
+ attention_mask: Optional[torch.Tensor] = None,
327
+ position_ids: Optional[torch.LongTensor] = None,
328
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
329
+ output_attentions: bool = False,
330
+ use_cache: bool = False,
331
+ cache_position: Optional[torch.LongTensor] = None,
332
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
333
+ bsz, q_len, _ = hidden_states.size()
334
+
335
+ query_states = self.q_proj(hidden_states)
336
+ key_states = self.k_proj(hidden_states)
337
+ value_states = self.v_proj(hidden_states)
338
+
339
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
340
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
341
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
342
+
343
+ if past_key_value is not None:
344
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
345
+
346
+ # repeat k/v heads if n_kv_heads < n_heads
347
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
348
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
349
+
350
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
351
+
352
+ if attention_mask is not None: # no matter the length, we just slice it
353
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
354
+ attn_weights = attn_weights + causal_mask
355
+
356
+ # upcast attention to fp32
357
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
358
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
359
+ attn_output = torch.matmul(attn_weights, value_states)
360
+
361
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
362
+ raise ValueError(
363
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
364
+ f" {attn_output.size()}"
365
+ )
366
+
367
+ attn_output = attn_output.transpose(1, 2).contiguous()
368
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
369
+
370
+ attn_output = self.o_proj(attn_output)
371
+
372
+ if not output_attentions:
373
+ attn_weights = None
374
+
375
+ return attn_output, attn_weights, past_key_value
376
+
377
+
378
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Jamba
379
+ class JambaFlashAttention2(JambaAttention):
380
+ """
381
+ Jamba flash attention module. This module inherits from `JambaAttention` as the weights of the module stays
382
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
383
+ flash attention and deal with padding tokens in case the input contains any of them.
384
+ """
385
+
386
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
387
+ def __init__(self, *args, **kwargs):
388
+ super().__init__(*args, **kwargs)
389
+
390
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
391
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
392
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
393
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
394
+
395
+ def forward(
396
+ self,
397
+ hidden_states: torch.Tensor,
398
+ attention_mask: Optional[torch.Tensor] = None,
399
+ position_ids: Optional[torch.LongTensor] = None,
400
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
401
+ output_attentions: bool = False,
402
+ use_cache: bool = False,
403
+ cache_position: Optional[torch.LongTensor] = None,
404
+ **kwargs,
405
+ ):
406
+ bsz, q_len, _ = hidden_states.size()
407
+
408
+ query_states = self.q_proj(hidden_states)
409
+ key_states = self.k_proj(hidden_states)
410
+ value_states = self.v_proj(hidden_states)
411
+
412
+ # Flash attention requires the input to have the shape
413
+ # batch_size x seq_length x head_dim x hidden_dim
414
+ # therefore we just need to keep the original shape
415
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
416
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
417
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
418
+
419
+ kv_seq_len = cache_position[-1]
420
+
421
+ use_sliding_windows = (
422
+ _flash_supports_window_size
423
+ and getattr(self.config, "sliding_window", None) is not None
424
+ and kv_seq_len > self.config.sliding_window
425
+ )
426
+
427
+ if not _flash_supports_window_size:
428
+ logger.warning_once(
429
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
430
+ " make sure to upgrade flash-attn library."
431
+ )
432
+
433
+ if past_key_value is not None:
434
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
435
+ cache_has_contents = cache_position[0] > 0
436
+ if (
437
+ getattr(self.config, "sliding_window", None) is not None
438
+ and kv_seq_len > self.config.sliding_window
439
+ and cache_has_contents
440
+ ):
441
+ slicing_tokens = 1 - self.config.sliding_window
442
+
443
+ past_key = past_key_value[self.layer_idx][0]
444
+ past_value = past_key_value[self.layer_idx][1]
445
+
446
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
447
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
448
+
449
+ if past_key.shape[-2] != self.config.sliding_window - 1:
450
+ raise ValueError(
451
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
452
+ f" {past_key.shape}"
453
+ )
454
+
455
+ if attention_mask is not None:
456
+ attention_mask = attention_mask[:, slicing_tokens:]
457
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
458
+
459
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
460
+
461
+ # repeat k/v heads if n_kv_heads < n_heads
462
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
463
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
464
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
465
+
466
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
467
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
468
+ # cast them back in float16 just to be sure everything works as expected.
469
+ input_dtype = query_states.dtype
470
+ if input_dtype == torch.float32:
471
+ if torch.is_autocast_enabled():
472
+ target_dtype = torch.get_autocast_gpu_dtype()
473
+ # Handle the case where the model is quantized
474
+ elif hasattr(self.config, "_pre_quantization_dtype"):
475
+ target_dtype = self.config._pre_quantization_dtype
476
+ else:
477
+ target_dtype = self.q_proj.weight.dtype
478
+
479
+ logger.warning_once(
480
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
481
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
482
+ f" {target_dtype}."
483
+ )
484
+
485
+ query_states = query_states.to(target_dtype)
486
+ key_states = key_states.to(target_dtype)
487
+ value_states = value_states.to(target_dtype)
488
+
489
+ # Reashape to the expected shape for Flash Attention
490
+ query_states = query_states.transpose(1, 2)
491
+ key_states = key_states.transpose(1, 2)
492
+ value_states = value_states.transpose(1, 2)
493
+
494
+ attn_output = self._flash_attention_forward(
495
+ query_states,
496
+ key_states,
497
+ value_states,
498
+ attention_mask,
499
+ q_len,
500
+ dropout=dropout_rate,
501
+ use_sliding_windows=use_sliding_windows,
502
+ )
503
+
504
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
505
+ attn_output = self.o_proj(attn_output)
506
+
507
+ if not output_attentions:
508
+ attn_weights = None
509
+
510
+ return attn_output, attn_weights, past_key_value
511
+
512
+ def _flash_attention_forward(
513
+ self,
514
+ query_states,
515
+ key_states,
516
+ value_states,
517
+ attention_mask,
518
+ query_length,
519
+ dropout=0.0,
520
+ softmax_scale=None,
521
+ use_sliding_windows=False,
522
+ ):
523
+ """
524
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
525
+ first unpad the input, then computes the attention scores and pad the final attention scores.
526
+
527
+ Args:
528
+ query_states (`torch.Tensor`):
529
+ Input query states to be passed to Flash Attention API
530
+ key_states (`torch.Tensor`):
531
+ Input key states to be passed to Flash Attention API
532
+ value_states (`torch.Tensor`):
533
+ Input value states to be passed to Flash Attention API
534
+ attention_mask (`torch.Tensor`):
535
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
536
+ position of padding tokens and 1 for the position of non-padding tokens.
537
+ dropout (`float`, *optional*):
538
+ Attention dropout
539
+ softmax_scale (`float`, *optional*):
540
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
541
+ use_sliding_windows (`bool`, *optional*):
542
+ Whether to activate sliding window attention.
543
+ """
544
+ if not self._flash_attn_uses_top_left_mask:
545
+ causal = self.is_causal
546
+ else:
547
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
548
+ causal = self.is_causal and query_length != 1
549
+
550
+ # Contains at least one padding token in the sequence
551
+ if attention_mask is not None:
552
+ batch_size = query_states.shape[0]
553
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
554
+ query_states, key_states, value_states, attention_mask, query_length
555
+ )
556
+
557
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
558
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
559
+
560
+ if not use_sliding_windows:
561
+ attn_output_unpad = flash_attn_varlen_func(
562
+ query_states,
563
+ key_states,
564
+ value_states,
565
+ cu_seqlens_q=cu_seqlens_q,
566
+ cu_seqlens_k=cu_seqlens_k,
567
+ max_seqlen_q=max_seqlen_in_batch_q,
568
+ max_seqlen_k=max_seqlen_in_batch_k,
569
+ dropout_p=dropout,
570
+ softmax_scale=softmax_scale,
571
+ causal=causal,
572
+ )
573
+ else:
574
+ attn_output_unpad = flash_attn_varlen_func(
575
+ query_states,
576
+ key_states,
577
+ value_states,
578
+ cu_seqlens_q=cu_seqlens_q,
579
+ cu_seqlens_k=cu_seqlens_k,
580
+ max_seqlen_q=max_seqlen_in_batch_q,
581
+ max_seqlen_k=max_seqlen_in_batch_k,
582
+ dropout_p=dropout,
583
+ softmax_scale=softmax_scale,
584
+ causal=causal,
585
+ window_size=(self.config.sliding_window, self.config.sliding_window),
586
+ )
587
+
588
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
589
+ else:
590
+ if not use_sliding_windows:
591
+ attn_output = flash_attn_func(
592
+ query_states,
593
+ key_states,
594
+ value_states,
595
+ dropout,
596
+ softmax_scale=softmax_scale,
597
+ causal=causal,
598
+ )
599
+ else:
600
+ attn_output = flash_attn_func(
601
+ query_states,
602
+ key_states,
603
+ value_states,
604
+ dropout,
605
+ softmax_scale=softmax_scale,
606
+ causal=causal,
607
+ window_size=(self.config.sliding_window, self.config.sliding_window),
608
+ )
609
+
610
+ return attn_output
611
+
612
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralFlashAttention2._upad_input
613
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
614
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
615
+
616
+ # On the first iteration we need to properly re-create the padding mask
617
+ # by slicing it on the proper place
618
+ if kv_seq_len != attention_mask.shape[-1]:
619
+ attention_mask_num_tokens = attention_mask.shape[-1]
620
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
621
+
622
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
623
+
624
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
625
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
626
+
627
+ if query_length == kv_seq_len:
628
+ query_layer = index_first_axis(
629
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
630
+ )
631
+ cu_seqlens_q = cu_seqlens_k
632
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
633
+ indices_q = indices_k
634
+ elif query_length == 1:
635
+ max_seqlen_in_batch_q = 1
636
+ cu_seqlens_q = torch.arange(
637
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
638
+ ) # There is a memcpy here, that is very bad.
639
+ indices_q = cu_seqlens_q[:-1]
640
+ query_layer = query_layer.squeeze(1)
641
+ else:
642
+ # The -q_len: slice assumes left padding.
643
+ attention_mask = attention_mask[:, -query_length:]
644
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
645
+
646
+ return (
647
+ query_layer,
648
+ key_layer,
649
+ value_layer,
650
+ indices_q,
651
+ (cu_seqlens_q, cu_seqlens_k),
652
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
653
+ )
654
+
655
+
656
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Jamba
657
+ class JambaSdpaAttention(JambaAttention):
658
+ """
659
+ Jamba attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
660
+ `JambaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
661
+ SDPA API.
662
+ """
663
+
664
+ # Adapted from JambaAttention.forward
665
+ def forward(
666
+ self,
667
+ hidden_states: torch.Tensor,
668
+ attention_mask: Optional[torch.Tensor] = None,
669
+ position_ids: Optional[torch.LongTensor] = None,
670
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
671
+ output_attentions: bool = False,
672
+ use_cache: bool = False,
673
+ cache_position: Optional[torch.LongTensor] = None,
674
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
675
+ if output_attentions:
676
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
677
+ logger.warning_once(
678
+ "JambaModel is using JambaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
679
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
680
+ )
681
+ return super().forward(
682
+ hidden_states=hidden_states,
683
+ attention_mask=attention_mask,
684
+ position_ids=position_ids,
685
+ past_key_value=past_key_value,
686
+ output_attentions=output_attentions,
687
+ use_cache=use_cache,
688
+ )
689
+
690
+ bsz, q_len, _ = hidden_states.size()
691
+
692
+ query_states = self.q_proj(hidden_states)
693
+ key_states = self.k_proj(hidden_states)
694
+ value_states = self.v_proj(hidden_states)
695
+
696
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
697
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
698
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
699
+
700
+ if past_key_value is not None:
701
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
702
+
703
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
704
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
705
+
706
+ causal_mask = attention_mask
707
+ if attention_mask is not None:
708
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
709
+
710
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
711
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
712
+ if query_states.device.type == "cuda" and attention_mask is not None:
713
+ query_states = query_states.contiguous()
714
+ key_states = key_states.contiguous()
715
+ value_states = value_states.contiguous()
716
+
717
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
718
+ query_states,
719
+ key_states,
720
+ value_states,
721
+ attn_mask=causal_mask,
722
+ dropout_p=self.attention_dropout if self.training else 0.0,
723
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
724
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
725
+ )
726
+
727
+ attn_output = attn_output.transpose(1, 2).contiguous()
728
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
729
+
730
+ attn_output = self.o_proj(attn_output)
731
+
732
+ return attn_output, None, past_key_value
733
+
734
+
735
+ JAMBA_ATTENTION_CLASSES = {
736
+ "eager": JambaAttention,
737
+ "flash_attention_2": JambaFlashAttention2,
738
+ "sdpa": JambaSdpaAttention,
739
+ }
740
+
741
+
742
+ # Adapted from transformers.models.mamba.modeling_mamba.MambaMixer
743
+ class JambaMambaMixer(nn.Module):
744
+ """
745
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
746
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
747
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
748
+ and is why Mamba is called **selective** state spaces)
749
+ """
750
+
751
+ def __init__(self, config: JambaConfig, layer_idx):
752
+ super().__init__()
753
+ self.config = config
754
+ self.layer_idx = layer_idx
755
+ self.hidden_size = config.hidden_size
756
+ self.ssm_state_size = config.mamba_d_state
757
+ self.conv_kernel_size = config.mamba_d_conv
758
+ self.intermediate_size = config.mamba_expand * config.hidden_size
759
+ self.time_step_rank = config.mamba_dt_rank
760
+ self.use_conv_bias = config.mamba_conv_bias
761
+ self.use_bias = config.mamba_proj_bias
762
+ self.conv1d = nn.Conv1d(
763
+ in_channels=self.intermediate_size,
764
+ out_channels=self.intermediate_size,
765
+ bias=self.use_conv_bias,
766
+ kernel_size=self.conv_kernel_size,
767
+ groups=self.intermediate_size,
768
+ padding=self.conv_kernel_size - 1,
769
+ )
770
+
771
+ self.activation = config.hidden_act
772
+ self.act = ACT2FN[config.hidden_act]
773
+
774
+ self.use_fast_kernels = config.use_mamba_kernels
775
+
776
+ # projection of the input hidden states
777
+ self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias)
778
+ # selective projection used to make dt, B and C input dependant
779
+ self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
780
+ # time step projection (discretization)
781
+ self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
782
+
783
+ # S4D real initialization. These are not discretized!
784
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
785
+ A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
786
+ A = A.expand(self.intermediate_size, -1).contiguous()
787
+
788
+ self.A_log = nn.Parameter(torch.log(A))
789
+ self.D = nn.Parameter(torch.ones(self.intermediate_size))
790
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
791
+
792
+ self.dt_layernorm = JambaRMSNorm(self.time_step_rank, eps=config.rms_norm_eps)
793
+ self.b_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps)
794
+ self.c_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps)
795
+
796
+ if not is_fast_path_available:
797
+ logger.warning_once(
798
+ "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
799
+ " is None. To install follow https://github.com/state-spaces/mamba/#installation and"
800
+ " https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config"
801
+ )
802
+
803
+ def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: HybridMambaAttentionDynamicCache = None):
804
+ batch_size, seq_len, _ = hidden_states.shape
805
+ use_precomputed_states = (
806
+ cache_params is not None
807
+ and cache_params.has_previous_state
808
+ and seq_len == 1
809
+ and cache_params.conv_states[self.layer_idx].shape[0]
810
+ == cache_params.ssm_states[self.layer_idx].shape[0]
811
+ == batch_size
812
+ )
813
+ # 1. Gated MLP's linear projection
814
+ projected_states = self.in_proj(hidden_states).transpose(1, 2)
815
+
816
+ # We can't use `mamba_inner_fn` even if in training and without cache params because we have the
817
+ # inner layernorms which isn't supported by this fused kernel
818
+ hidden_states, gate = projected_states.chunk(2, dim=1)
819
+
820
+ # 2. Convolution sequence transformation
821
+ conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
822
+ if use_precomputed_states:
823
+ hidden_states = causal_conv1d_update(
824
+ hidden_states.squeeze(-1),
825
+ cache_params.conv_states[self.layer_idx],
826
+ conv_weights,
827
+ self.conv1d.bias,
828
+ self.activation,
829
+ )
830
+ hidden_states = hidden_states.unsqueeze(-1)
831
+ else:
832
+ if cache_params is not None:
833
+ conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
834
+ cache_params.conv_states[self.layer_idx].copy_(conv_states)
835
+ hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation)
836
+
837
+ # 3. State Space Model sequence transformation
838
+ # 3.a. input varying initialization of time_step, B and C
839
+ ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
840
+ time_step, B, C = torch.split(
841
+ ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
842
+ )
843
+
844
+ time_step = self.dt_layernorm(time_step)
845
+ B = self.b_layernorm(B)
846
+ C = self.c_layernorm(C)
847
+
848
+ # Here we need to apply dt_proj without the bias, as the bias is added in the selective scan kernel.
849
+ # This is a hack to apply dt_proj while still using the forward pass of `torch.nn.Linear`, which is needed
850
+ # in order to make quantization work. Quantization code replaces `torch.nn.Linear` layers with quantized
851
+ # linear layers, and requires to call the forward pass directly.
852
+ # The original code here was: ```discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)```
853
+ time_proj_bias = self.dt_proj.bias
854
+ self.dt_proj.bias = None
855
+ discrete_time_step = self.dt_proj(time_step).transpose(1, 2)
856
+ self.dt_proj.bias = time_proj_bias
857
+
858
+ A = -torch.exp(self.A_log.float())
859
+ # 3.c perform the recurrence y ← SSM(A, B, C)(x)
860
+ time_proj_bias = time_proj_bias.float() if time_proj_bias is not None else None
861
+ if use_precomputed_states:
862
+ scan_outputs = selective_state_update(
863
+ cache_params.ssm_states[self.layer_idx],
864
+ hidden_states[..., 0],
865
+ discrete_time_step[..., 0],
866
+ A,
867
+ B[:, 0],
868
+ C[:, 0],
869
+ self.D,
870
+ gate[..., 0],
871
+ time_proj_bias,
872
+ dt_softplus=True,
873
+ ).unsqueeze(-1)
874
+ else:
875
+ scan_outputs, ssm_state = selective_scan_fn(
876
+ hidden_states,
877
+ discrete_time_step,
878
+ A,
879
+ B.transpose(1, 2),
880
+ C.transpose(1, 2),
881
+ self.D.float(),
882
+ gate,
883
+ time_proj_bias,
884
+ delta_softplus=True,
885
+ return_last_state=True,
886
+ )
887
+ if ssm_state is not None and cache_params is not None:
888
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
889
+
890
+ # 4. Final linear projection
891
+ contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
892
+
893
+ return contextualized_states
894
+
895
+ # fmt: off
896
+ def slow_forward(self, input_states, cache_params: HybridMambaAttentionDynamicCache = None):
897
+ batch_size, seq_len, _ = input_states.shape
898
+ dtype = input_states.dtype
899
+ # 1. Gated MLP's linear projection
900
+ projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len]
901
+ hidden_states, gate = projected_states.chunk(2, dim=1)
902
+
903
+ use_cache = isinstance(cache_params,HybridMambaAttentionDynamicCache)
904
+ # 2. Convolution sequence transformation
905
+ if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size:
906
+ if self.training:
907
+ # In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass
908
+ ssm_state = cache_params.ssm_states[self.layer_idx].clone()
909
+ else:
910
+ ssm_state = cache_params.ssm_states[self.layer_idx]
911
+
912
+ if cache_params.has_previous_state and seq_len == 1 and \
913
+ cache_params.conv_states[self.layer_idx].shape[0] == batch_size:
914
+ conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
915
+ conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
916
+ conv_state[:, :, -1] = hidden_states[:, :, 0]
917
+ cache_params.conv_states[self.layer_idx] = conv_state
918
+ hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
919
+ if self.use_conv_bias:
920
+ hidden_states += self.conv1d.bias
921
+ hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) # [batch, intermediate_size, 1] : decoding
922
+ else:
923
+ conv_state = nn.functional.pad(
924
+ hidden_states,
925
+ (self.conv_kernel_size - hidden_states.shape[-1], 0)
926
+ )
927
+ cache_params.conv_states[self.layer_idx] = conv_state
928
+ hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
929
+ else:
930
+ ssm_state = torch.zeros(
931
+ (batch_size, self.intermediate_size, self.ssm_state_size),
932
+ device=hidden_states.device, dtype=dtype
933
+ )
934
+ hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
935
+
936
+ # 3. State Space Model sequence transformation
937
+ # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
938
+ ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
939
+ time_step, B, C = torch.split(
940
+ ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
941
+ )
942
+
943
+ time_step = self.dt_layernorm(time_step)
944
+ B = self.b_layernorm(B)
945
+ C = self.c_layernorm(C)
946
+
947
+ discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size]
948
+ discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # [batch, intermediate_size, seq_len]
949
+
950
+ # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
951
+ A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size]
952
+ discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size]
953
+ discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size]
954
+ deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
955
+
956
+ # 3.c perform the recurrence y ← SSM(A, B, C)(x)
957
+ scan_outputs = []
958
+ for i in range(seq_len):
959
+ ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] # [batch, intermediade_size, ssm_state]
960
+ scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) # [batch, intermediade_size, 1]
961
+ scan_outputs.append(scan_output[:, :, 0])
962
+ scan_output = torch.stack(scan_outputs, dim=-1) # [batch, intermediade_size, seq_len]
963
+ scan_output = scan_output + (hidden_states * self.D[None, :, None])
964
+ scan_output = (scan_output * self.act(gate))
965
+
966
+ if use_cache:
967
+ cache_params.ssm_states[self.layer_idx] = ssm_state
968
+
969
+ # 4. Final linear projection
970
+ contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size]
971
+ return contextualized_states
972
+ # fmt: on
973
+
974
+ def forward(self, hidden_states, cache_params: HybridMambaAttentionDynamicCache = None):
975
+ if self.use_fast_kernels:
976
+ if not is_fast_path_available or "cuda" not in self.x_proj.weight.device.type:
977
+ raise ValueError(
978
+ "Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device"
979
+ )
980
+ return self.cuda_kernels_forward(hidden_states, cache_params)
981
+ return self.slow_forward(hidden_states, cache_params)
982
+
983
+
984
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Jamba
985
+ class JambaMLP(nn.Module):
986
+ def __init__(self, config):
987
+ super().__init__()
988
+ self.config = config
989
+ self.hidden_size = config.hidden_size
990
+ self.intermediate_size = config.intermediate_size
991
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
992
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
993
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
994
+ self.act_fn = ACT2FN[config.hidden_act]
995
+
996
+ def forward(self, x):
997
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
998
+
999
+
1000
+ # Adapted from transformers.models.mixtral.modeling_mixtral.MixtralSparseMoeBlock with Mistral->Jamba
1001
+ class JambaSparseMoeBlock(nn.Module):
1002
+ """
1003
+ This implementation is
1004
+ strictly equivalent to standard MoE with full capacity (no
1005
+ dropped tokens). It's faster since it formulates MoE operations
1006
+ in terms of block-sparse operations to accomodate imbalanced
1007
+ assignments of tokens to experts, whereas standard MoE either
1008
+ (1) drop tokens at the cost of reduced performance or (2) set
1009
+ capacity factor to number of experts and thus waste computation
1010
+ and memory on padding.
1011
+ """
1012
+
1013
+ def __init__(self, config: JambaConfig):
1014
+ super().__init__()
1015
+ self.hidden_dim = config.hidden_size
1016
+ self.ffn_dim = config.intermediate_size
1017
+ self.num_experts = config.num_experts
1018
+ self.top_k = config.num_experts_per_tok
1019
+
1020
+ self.router = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
1021
+ self.experts = nn.ModuleList([JambaMLP(config) for _ in range(self.num_experts)])
1022
+
1023
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
1024
+ """ """
1025
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
1026
+
1027
+ hidden_states = hidden_states.view(-1, hidden_dim)
1028
+ # router_logits: (batch * sequence_length, n_experts)
1029
+ router_logits = self.router(hidden_states)
1030
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
1031
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
1032
+ # we cast back to the input dtype
1033
+ routing_weights = routing_weights.to(hidden_states.dtype)
1034
+
1035
+ final_hidden_states = torch.zeros(
1036
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
1037
+ )
1038
+
1039
+ # One hot encode the selected experts to create an expert mask
1040
+ # this will be used to easily index which expert is going to be sollicitated
1041
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
1042
+
1043
+ # Loop over all available experts in the model and perform the computation on each expert
1044
+ for expert_idx in range(self.num_experts):
1045
+ expert_layer = self.experts[expert_idx]
1046
+ idx, top_x = torch.where(expert_mask[expert_idx])
1047
+
1048
+ if top_x.shape[0] == 0:
1049
+ continue
1050
+
1051
+ # Index the correct hidden states and compute the expert hidden state for
1052
+ # the current expert. We need to make sure to multiply the output hidden
1053
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
1054
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
1055
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
1056
+
1057
+ # However `index_add_` only support torch tensors for indexing so we'll use
1058
+ # the `top_x` tensor here.
1059
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
1060
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
1061
+ return final_hidden_states, router_logits
1062
+
1063
+
1064
+ class JambaAttentionDecoderLayer(nn.Module):
1065
+ def __init__(self, config: JambaConfig, layer_idx: int):
1066
+ super().__init__()
1067
+ num_experts = config.layers_num_experts[layer_idx]
1068
+ self.self_attn = JAMBA_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
1069
+
1070
+ ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP
1071
+ self.feed_forward = ffn_layer_class(config)
1072
+ self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1073
+ self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1074
+
1075
+ def forward(
1076
+ self,
1077
+ hidden_states: torch.Tensor,
1078
+ attention_mask: Optional[torch.Tensor] = None,
1079
+ position_ids: Optional[torch.LongTensor] = None,
1080
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
1081
+ output_attentions: Optional[bool] = False,
1082
+ output_router_logits: Optional[bool] = False,
1083
+ use_cache: Optional[bool] = False,
1084
+ cache_position: Optional[torch.LongTensor] = None,
1085
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1086
+ """
1087
+ Args:
1088
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1089
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1090
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1091
+ past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
1092
+ output_attentions (`bool`, *optional*):
1093
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1094
+ returned tensors for more detail.
1095
+ output_router_logits (`bool`, *optional*):
1096
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1097
+ should not be returned during inference.
1098
+ use_cache (`bool`, *optional*):
1099
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1100
+ (see `past_key_values`).
1101
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1102
+ Indices depicting the position of the input sequence tokens in the sequence.
1103
+ """
1104
+
1105
+ residual = hidden_states
1106
+
1107
+ hidden_states = self.input_layernorm(hidden_states)
1108
+
1109
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1110
+ hidden_states=hidden_states,
1111
+ attention_mask=attention_mask,
1112
+ position_ids=position_ids,
1113
+ past_key_value=past_key_value,
1114
+ output_attentions=output_attentions,
1115
+ use_cache=use_cache,
1116
+ cache_position=cache_position,
1117
+ )
1118
+
1119
+ # residual connection after attention
1120
+ hidden_states = residual + hidden_states
1121
+
1122
+ # feed-forward (experts/MLP)
1123
+ residual = hidden_states
1124
+ hidden_states = self.pre_ff_layernorm(hidden_states)
1125
+ ff_outputs = self.feed_forward(hidden_states)
1126
+ if isinstance(ff_outputs, tuple):
1127
+ hidden_states, router_logits = ff_outputs
1128
+ else:
1129
+ hidden_states, router_logits = ff_outputs, None
1130
+ hidden_states = residual + hidden_states
1131
+
1132
+ outputs = (hidden_states,)
1133
+
1134
+ if output_attentions:
1135
+ outputs += (self_attn_weights,)
1136
+
1137
+ if use_cache:
1138
+ outputs += (present_key_value,)
1139
+
1140
+ if output_router_logits:
1141
+ outputs += (router_logits,)
1142
+
1143
+ return outputs
1144
+
1145
+
1146
+ class JambaMambaDecoderLayer(nn.Module):
1147
+ def __init__(self, config: JambaConfig, layer_idx: int):
1148
+ super().__init__()
1149
+ num_experts = config.layers_num_experts[layer_idx]
1150
+ self.mamba = JambaMambaMixer(config=config, layer_idx=layer_idx)
1151
+
1152
+ ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP
1153
+ self.feed_forward = ffn_layer_class(config)
1154
+ self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1155
+ self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1156
+
1157
+ def forward(
1158
+ self,
1159
+ hidden_states: torch.Tensor,
1160
+ attention_mask: Optional[torch.Tensor] = None,
1161
+ position_ids: Optional[torch.LongTensor] = None,
1162
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
1163
+ output_attentions: Optional[bool] = False,
1164
+ output_router_logits: Optional[bool] = False,
1165
+ use_cache: Optional[bool] = False,
1166
+ cache_position: Optional[torch.LongTensor] = None,
1167
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1168
+ """
1169
+ Args:
1170
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1171
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1172
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1173
+ past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
1174
+ output_attentions (`bool`, *optional*):
1175
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1176
+ returned tensors for more detail.
1177
+ output_router_logits (`bool`, *optional*):
1178
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1179
+ should not be returned during inference.
1180
+ use_cache (`bool`, *optional*):
1181
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1182
+ (see `past_key_values`).
1183
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1184
+ Indices depicting the position of the input sequence tokens in the sequence.
1185
+ """
1186
+
1187
+ residual = hidden_states
1188
+
1189
+ hidden_states = self.input_layernorm(hidden_states)
1190
+
1191
+ hidden_states = self.mamba(
1192
+ hidden_states=hidden_states,
1193
+ cache_params=past_key_value,
1194
+ )
1195
+ self_attn_weights = None
1196
+
1197
+ # residual connection after mamba
1198
+ hidden_states = residual + hidden_states
1199
+
1200
+ # feed-forward (experts/MLP)
1201
+ residual = hidden_states
1202
+ hidden_states = self.pre_ff_layernorm(hidden_states)
1203
+ ff_outputs = self.feed_forward(hidden_states)
1204
+ if isinstance(ff_outputs, tuple):
1205
+ hidden_states, router_logits = ff_outputs
1206
+ else:
1207
+ hidden_states, router_logits = ff_outputs, None
1208
+ hidden_states = residual + hidden_states
1209
+
1210
+ outputs = (hidden_states,)
1211
+
1212
+ if output_attentions:
1213
+ outputs += (self_attn_weights,)
1214
+
1215
+ if use_cache:
1216
+ outputs += (past_key_value,)
1217
+
1218
+ if output_router_logits:
1219
+ outputs += (router_logits,)
1220
+
1221
+ return outputs
1222
+
1223
+
1224
+ JAMBA_START_DOCSTRING = r"""
1225
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1226
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1227
+ etc.)
1228
+
1229
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1230
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1231
+ and behavior.
1232
+
1233
+ Parameters:
1234
+ config ([`JambaConfig`]):
1235
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1236
+ load the weights associated with the model, only the configuration. Check out the
1237
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1238
+ """
1239
+
1240
+
1241
+ @add_start_docstrings(
1242
+ "The bare Jamba Model outputting raw hidden-states without any specific head on top.",
1243
+ JAMBA_START_DOCSTRING,
1244
+ )
1245
+ class JambaPreTrainedModel(PreTrainedModel):
1246
+ config_class = JambaConfig
1247
+ base_model_prefix = "model"
1248
+ supports_gradient_checkpointing = True
1249
+ _no_split_modules = ["JambaAttentionDecoderLayer", "JambaMambaDecoderLayer"]
1250
+ _skip_keys_device_placement = "past_key_values"
1251
+ _supports_flash_attn_2 = True
1252
+ _supports_sdpa = True
1253
+ _supports_cache_class = True
1254
+
1255
+ def _init_weights(self, module):
1256
+ std = self.config.initializer_range
1257
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
1258
+ module.weight.data.normal_(mean=0.0, std=std)
1259
+ if module.bias is not None:
1260
+ module.bias.data.zero_()
1261
+ elif isinstance(module, nn.Embedding):
1262
+ module.weight.data.normal_(mean=0.0, std=std)
1263
+ if module.padding_idx is not None:
1264
+ module.weight.data[module.padding_idx].zero_()
1265
+
1266
+
1267
+ JAMBA_INPUTS_DOCSTRING = r"""
1268
+ Args:
1269
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1270
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1271
+ it.
1272
+
1273
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1274
+ [`PreTrainedTokenizer.__call__`] for details.
1275
+
1276
+ [What are input IDs?](../glossary#input-ids)
1277
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1278
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1279
+
1280
+ - 1 for tokens that are **not masked**,
1281
+ - 0 for tokens that are **masked**.
1282
+
1283
+ [What are attention masks?](../glossary#attention-mask)
1284
+
1285
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1286
+ [`PreTrainedTokenizer.__call__`] for details.
1287
+
1288
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1289
+ `past_key_values`).
1290
+
1291
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1292
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1293
+ information on the default strategy.
1294
+
1295
+ - 1 indicates the head is **not masked**,
1296
+ - 0 indicates the head is **masked**.
1297
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1298
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1299
+ config.n_positions - 1]`.
1300
+
1301
+ [What are position IDs?](../glossary#position-ids)
1302
+ past_key_values (`HybridMambaAttentionDynamicCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1303
+ A HybridMambaAttentionDynamicCache object containing pre-computed hidden-states (keys and values in the
1304
+ self-attention blocks and convolution and ssm states in the mamba blocks) that can be used (see
1305
+ `past_key_values` input) to speed up sequential decoding.
1306
+ Key and value cache tensors have shape `(batch_size, num_heads, seq_len, head_dim)`.
1307
+ Convolution and ssm states tensors have shape `(batch_size, d_inner, d_conv)` and
1308
+ `(batch_size, d_inner, d_state)` respectively.
1309
+ See the `HybridMambaAttentionDynamicCache` class for more details.
1310
+
1311
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
1312
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1313
+ `input_ids` of shape `(batch_size, sequence_length)`.
1314
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1315
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1316
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1317
+ model's internal embedding lookup matrix.
1318
+ use_cache (`bool`, *optional*):
1319
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1320
+ `past_key_values`).
1321
+ output_attentions (`bool`, *optional*):
1322
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1323
+ tensors for more detail.
1324
+ output_hidden_states (`bool`, *optional*):
1325
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1326
+ more detail.
1327
+ output_router_logits (`bool`, *optional*):
1328
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1329
+ should not be returned during inference.
1330
+ return_dict (`bool`, *optional*):
1331
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1332
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1333
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1334
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1335
+ the complete sequence length.
1336
+ """
1337
+
1338
+ ALL_DECODER_LAYER_TYPES = {"attention": JambaAttentionDecoderLayer, "mamba": JambaMambaDecoderLayer}
1339
+
1340
+
1341
+ @add_start_docstrings(
1342
+ "The bare Jamba Model outputting raw hidden-states without any specific head on top.",
1343
+ JAMBA_START_DOCSTRING,
1344
+ )
1345
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->JAMBA, Mistral->Jamba
1346
+ class JambaModel(JambaPreTrainedModel):
1347
+ """
1348
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`JambaDecoderLayer`]
1349
+
1350
+ Args:
1351
+ config: JambaConfig
1352
+ """
1353
+
1354
+ def __init__(self, config: JambaConfig):
1355
+ super().__init__(config)
1356
+ self.padding_idx = config.pad_token_id
1357
+ self.vocab_size = config.vocab_size
1358
+
1359
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1360
+ decoder_layers = []
1361
+ for i in range(config.num_hidden_layers):
1362
+ layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]]
1363
+ decoder_layers.append(layer_class(config, layer_idx=i))
1364
+ self.layers = nn.ModuleList(decoder_layers)
1365
+
1366
+ self._attn_implementation = config._attn_implementation
1367
+ self.final_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1368
+
1369
+ self.gradient_checkpointing = False
1370
+ # Initialize weights and apply final processing
1371
+ self.post_init()
1372
+
1373
+ def get_input_embeddings(self):
1374
+ return self.embed_tokens
1375
+
1376
+ def set_input_embeddings(self, value):
1377
+ self.embed_tokens = value
1378
+
1379
+ @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
1380
+ def forward(
1381
+ self,
1382
+ input_ids: torch.LongTensor = None,
1383
+ attention_mask: Optional[torch.Tensor] = None,
1384
+ position_ids: Optional[torch.LongTensor] = None,
1385
+ past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
1386
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1387
+ use_cache: Optional[bool] = None,
1388
+ output_attentions: Optional[bool] = None,
1389
+ output_hidden_states: Optional[bool] = None,
1390
+ output_router_logits: Optional[bool] = None,
1391
+ return_dict: Optional[bool] = None,
1392
+ cache_position: Optional[torch.LongTensor] = None,
1393
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
1394
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1395
+ output_router_logits = (
1396
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1397
+ )
1398
+ output_hidden_states = (
1399
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1400
+ )
1401
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1402
+
1403
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1404
+
1405
+ if (input_ids is None) ^ (inputs_embeds is not None):
1406
+ raise ValueError(
1407
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
1408
+ )
1409
+
1410
+ if self.gradient_checkpointing and self.training and use_cache:
1411
+ logger.warning_once(
1412
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1413
+ )
1414
+ use_cache = False
1415
+
1416
+ if inputs_embeds is None:
1417
+ inputs_embeds = self.embed_tokens(input_ids)
1418
+ hidden_states = inputs_embeds
1419
+
1420
+ if use_cache and past_key_values is None:
1421
+ logger.warning_once(
1422
+ "Jamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was "
1423
+ "provided, so no cache will be returned."
1424
+ )
1425
+
1426
+ if cache_position is None:
1427
+ cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
1428
+ if position_ids is None:
1429
+ position_ids = cache_position.unsqueeze(0)
1430
+
1431
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
1432
+
1433
+ all_hidden_states = () if output_hidden_states else None
1434
+ all_self_attns = () if output_attentions else None
1435
+ all_router_logits = () if output_router_logits else None
1436
+
1437
+ for decoder_layer in self.layers:
1438
+ if output_hidden_states:
1439
+ all_hidden_states += (hidden_states,)
1440
+
1441
+ if self.gradient_checkpointing and self.training:
1442
+ layer_outputs = self._gradient_checkpointing_func(
1443
+ decoder_layer.__call__,
1444
+ hidden_states,
1445
+ causal_mask,
1446
+ position_ids,
1447
+ past_key_values,
1448
+ output_attentions,
1449
+ output_router_logits,
1450
+ use_cache,
1451
+ cache_position,
1452
+ )
1453
+ else:
1454
+ layer_outputs = decoder_layer(
1455
+ hidden_states,
1456
+ attention_mask=causal_mask,
1457
+ position_ids=position_ids,
1458
+ past_key_value=past_key_values,
1459
+ output_attentions=output_attentions,
1460
+ output_router_logits=output_router_logits,
1461
+ use_cache=use_cache,
1462
+ cache_position=cache_position,
1463
+ )
1464
+
1465
+ hidden_states = layer_outputs[0]
1466
+
1467
+ if output_attentions:
1468
+ if layer_outputs[1] is not None:
1469
+ # append attentions only of attention layers. Mamba layers return `None` as the attention weights
1470
+ all_self_attns += (layer_outputs[1],)
1471
+
1472
+ if output_router_logits:
1473
+ if layer_outputs[-1] is not None:
1474
+ # append router logits only of expert layers. Regular MLP layers return `None` as the router logits
1475
+ all_router_logits += (layer_outputs[-1],)
1476
+
1477
+ hidden_states = self.final_layernorm(hidden_states)
1478
+
1479
+ # add hidden states from the last decoder layer
1480
+ if output_hidden_states:
1481
+ all_hidden_states += (hidden_states,)
1482
+
1483
+ if past_key_values and not past_key_values.has_previous_state:
1484
+ past_key_values.has_previous_state = True
1485
+
1486
+ next_cache = None if not use_cache else past_key_values
1487
+
1488
+ if not return_dict:
1489
+ return tuple(
1490
+ v
1491
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1492
+ if v is not None
1493
+ )
1494
+ return MoeModelOutputWithPast(
1495
+ last_hidden_state=hidden_states,
1496
+ past_key_values=next_cache,
1497
+ hidden_states=all_hidden_states,
1498
+ attentions=all_self_attns,
1499
+ router_logits=all_router_logits,
1500
+ )
1501
+
1502
+ def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
1503
+ if self.config._attn_implementation == "flash_attention_2":
1504
+ if attention_mask is not None and 0.0 in attention_mask:
1505
+ return attention_mask
1506
+ return None
1507
+
1508
+ dtype, device = input_tensor.dtype, input_tensor.device
1509
+ min_dtype = torch.finfo(dtype).min
1510
+ sequence_length = input_tensor.shape[1]
1511
+ target_length = cache_position[-1] + 1
1512
+
1513
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1514
+ if sequence_length != 1:
1515
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1516
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1517
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1518
+ if attention_mask is not None:
1519
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1520
+ if attention_mask.dim() == 2:
1521
+ mask_length = attention_mask.shape[-1]
1522
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1523
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1524
+
1525
+ if (
1526
+ self.config._attn_implementation == "sdpa"
1527
+ and attention_mask is not None
1528
+ and attention_mask.device.type == "cuda"
1529
+ ):
1530
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1531
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1532
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1533
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1534
+
1535
+ return causal_mask
1536
+
1537
+
1538
+ # Adapted from transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM with MIXTRAL->JAMBA, Mixtral->Jamba
1539
+ class JambaForCausalLM(JambaPreTrainedModel):
1540
+ _tied_weights_keys = ["lm_head.weight"]
1541
+
1542
+ def __init__(self, config: JambaConfig):
1543
+ super().__init__(config)
1544
+ self.model = JambaModel(config)
1545
+ self.vocab_size = config.vocab_size
1546
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1547
+ self.router_aux_loss_coef = config.router_aux_loss_coef
1548
+ self.num_experts = config.num_experts
1549
+ self.num_experts_per_tok = config.num_experts_per_tok
1550
+ # Initialize weights and apply final processing
1551
+ self.post_init()
1552
+
1553
+ def get_input_embeddings(self):
1554
+ return self.model.embed_tokens
1555
+
1556
+ def set_input_embeddings(self, value):
1557
+ self.model.embed_tokens = value
1558
+
1559
+ def get_output_embeddings(self):
1560
+ return self.lm_head
1561
+
1562
+ def set_output_embeddings(self, new_embeddings):
1563
+ self.lm_head = new_embeddings
1564
+
1565
+ def set_decoder(self, decoder):
1566
+ self.model = decoder
1567
+
1568
+ def get_decoder(self):
1569
+ return self.model
1570
+
1571
+ @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
1572
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1573
+ # Ignore copy
1574
+ def forward(
1575
+ self,
1576
+ input_ids: torch.LongTensor = None,
1577
+ attention_mask: Optional[torch.Tensor] = None,
1578
+ position_ids: Optional[torch.LongTensor] = None,
1579
+ past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
1580
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1581
+ labels: Optional[torch.LongTensor] = None,
1582
+ use_cache: Optional[bool] = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ output_router_logits: Optional[bool] = None,
1586
+ return_dict: Optional[bool] = None,
1587
+ cache_position: Optional[torch.LongTensor] = None,
1588
+ num_logits_to_keep: Optional[Union[int, None]] = None,
1589
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1590
+ r"""
1591
+ Args:
1592
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1593
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1594
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1595
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1596
+
1597
+ num_logits_to_keep (`int` or `None`, *optional*):
1598
+ Calculate logits for the last `num_logits_to_keep` tokens. If `None`, calculate logits for all
1599
+ `input_ids`. Only last token logits are needed for generation, and calculating them only for that token
1600
+ can save memory, which becomes pretty significant for long sequences.
1601
+
1602
+ Returns:
1603
+
1604
+ Example:
1605
+
1606
+ ```python
1607
+ >>> from transformers import AutoTokenizer, JambaForCausalLM
1608
+
1609
+ >>> model = JambaForCausalLM.from_pretrained("ai21labs/Jamba-v0.1")
1610
+ >>> tokenizer = AutoTokenizer.from_pretrained("ai21labs/Jamba-v0.1")
1611
+
1612
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1613
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1614
+
1615
+ >>> # Generate
1616
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1617
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1618
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1619
+ ```"""
1620
+
1621
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1622
+ output_router_logits = (
1623
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1624
+ )
1625
+
1626
+ output_hidden_states = (
1627
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1628
+ )
1629
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1630
+
1631
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1632
+ outputs = self.model(
1633
+ input_ids=input_ids,
1634
+ attention_mask=attention_mask,
1635
+ position_ids=position_ids,
1636
+ past_key_values=past_key_values,
1637
+ inputs_embeds=inputs_embeds,
1638
+ use_cache=use_cache,
1639
+ output_attentions=output_attentions,
1640
+ output_hidden_states=output_hidden_states,
1641
+ output_router_logits=output_router_logits,
1642
+ cache_position=cache_position,
1643
+ return_dict=return_dict,
1644
+ )
1645
+
1646
+ hidden_states = outputs[0]
1647
+ if num_logits_to_keep is None:
1648
+ logits = self.lm_head(hidden_states)
1649
+ else:
1650
+ logits = self.lm_head(hidden_states[..., -num_logits_to_keep:, :])
1651
+ logits = logits.float()
1652
+
1653
+ loss = None
1654
+ if labels is not None:
1655
+ # Shift so that tokens < n predict n
1656
+ shift_logits = logits[..., :-1, :].contiguous()
1657
+ shift_labels = labels[..., 1:].contiguous()
1658
+ # Flatten the tokens
1659
+ loss_fct = CrossEntropyLoss()
1660
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1661
+ shift_labels = shift_labels.view(-1)
1662
+ # Enable model parallelism
1663
+ shift_labels = shift_labels.to(shift_logits.device)
1664
+ loss = loss_fct(shift_logits, shift_labels)
1665
+
1666
+ aux_loss = None
1667
+ if output_router_logits:
1668
+ aux_loss = load_balancing_loss_func(
1669
+ outputs.router_logits if return_dict else outputs[-1],
1670
+ self.num_experts,
1671
+ self.num_experts_per_tok,
1672
+ attention_mask,
1673
+ )
1674
+ if labels is not None:
1675
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1676
+
1677
+ if not return_dict:
1678
+ output = (logits,) + outputs[1:]
1679
+ if output_router_logits:
1680
+ output = (aux_loss,) + output
1681
+ return (loss,) + output if loss is not None else output
1682
+
1683
+ return MoeCausalLMOutputWithPast(
1684
+ loss=loss,
1685
+ aux_loss=aux_loss,
1686
+ logits=logits,
1687
+ past_key_values=outputs.past_key_values,
1688
+ hidden_states=outputs.hidden_states,
1689
+ attentions=outputs.attentions,
1690
+ router_logits=outputs.router_logits,
1691
+ )
1692
+
1693
+ def prepare_inputs_for_generation(
1694
+ self,
1695
+ input_ids,
1696
+ past_key_values=None,
1697
+ attention_mask=None,
1698
+ inputs_embeds=None,
1699
+ output_router_logits=False,
1700
+ cache_position=None,
1701
+ **kwargs,
1702
+ ):
1703
+ empty_past_kv = past_key_values is None
1704
+
1705
+ # Omit tokens covered by past_key_values
1706
+ if not empty_past_kv:
1707
+ past_length = cache_position[0] if cache_position is not None else attention_mask.shape[1]
1708
+ max_cache_length = self.config.sliding_window
1709
+ # Keep only the unprocessed tokens:
1710
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1711
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1712
+ # input)
1713
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1714
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1715
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1716
+ # input_ids based on the past_length.
1717
+ elif past_length < input_ids.shape[1]:
1718
+ input_ids = input_ids[:, past_length:]
1719
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1720
+
1721
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1722
+ if (
1723
+ max_cache_length is not None
1724
+ and attention_mask is not None
1725
+ and past_length + input_ids.shape[1] > max_cache_length
1726
+ ):
1727
+ attention_mask = attention_mask[:, -max_cache_length:]
1728
+ else:
1729
+ past_key_values = HybridMambaAttentionDynamicCache(
1730
+ self.config, input_ids.shape[0], self.dtype, device=self.device
1731
+ )
1732
+
1733
+ position_ids = kwargs.get("position_ids", None)
1734
+ if attention_mask is not None and position_ids is None:
1735
+ # create position_ids on the fly for batch generation
1736
+ position_ids = attention_mask.long().cumsum(-1) - 1
1737
+ position_ids.masked_fill_(attention_mask == 0, 1)
1738
+ if not empty_past_kv:
1739
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1740
+
1741
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1742
+ if inputs_embeds is not None and empty_past_kv:
1743
+ model_inputs = {"inputs_embeds": inputs_embeds}
1744
+ else:
1745
+ model_inputs = {"input_ids": input_ids}
1746
+
1747
+ model_inputs.update(
1748
+ {
1749
+ "position_ids": position_ids,
1750
+ "past_key_values": past_key_values,
1751
+ "use_cache": kwargs.get("use_cache"),
1752
+ "attention_mask": attention_mask,
1753
+ "output_router_logits": output_router_logits,
1754
+ "num_logits_to_keep": self.config.num_logits_to_keep,
1755
+ "cache_position": cache_position,
1756
+ }
1757
+ )
1758
+ return model_inputs
1759
+
1760
+
1761
+ @add_start_docstrings(
1762
+ """
1763
+ The Jamba Model with a sequence classification head on top (linear layer).
1764
+
1765
+ [`JambaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1766
+ (e.g. GPT-2) do.
1767
+
1768
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1769
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1770
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1771
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1772
+ each row of the batch).
1773
+ """,
1774
+ JAMBA_START_DOCSTRING,
1775
+ )
1776
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralForSequenceClassification with Mixtral->Jamba, MIXTRAL->JAMBA
1777
+ class JambaForSequenceClassification(JambaPreTrainedModel):
1778
+ def __init__(self, config):
1779
+ super().__init__(config)
1780
+ self.num_labels = config.num_labels
1781
+ self.model = JambaModel(config)
1782
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1783
+
1784
+ # Initialize weights and apply final processing
1785
+ self.post_init()
1786
+
1787
+ def get_input_embeddings(self):
1788
+ return self.model.embed_tokens
1789
+
1790
+ def set_input_embeddings(self, value):
1791
+ self.model.embed_tokens = value
1792
+
1793
+ @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
1794
+ def forward(
1795
+ self,
1796
+ input_ids: torch.LongTensor = None,
1797
+ attention_mask: Optional[torch.Tensor] = None,
1798
+ position_ids: Optional[torch.LongTensor] = None,
1799
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1800
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1801
+ labels: Optional[torch.LongTensor] = None,
1802
+ use_cache: Optional[bool] = None,
1803
+ output_attentions: Optional[bool] = None,
1804
+ output_hidden_states: Optional[bool] = None,
1805
+ return_dict: Optional[bool] = None,
1806
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1807
+ r"""
1808
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1809
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1810
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1811
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1812
+ """
1813
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1814
+
1815
+ transformer_outputs = self.model(
1816
+ input_ids,
1817
+ attention_mask=attention_mask,
1818
+ position_ids=position_ids,
1819
+ past_key_values=past_key_values,
1820
+ inputs_embeds=inputs_embeds,
1821
+ use_cache=use_cache,
1822
+ output_attentions=output_attentions,
1823
+ output_hidden_states=output_hidden_states,
1824
+ return_dict=return_dict,
1825
+ )
1826
+ hidden_states = transformer_outputs[0]
1827
+ logits = self.score(hidden_states)
1828
+
1829
+ if input_ids is not None:
1830
+ batch_size = input_ids.shape[0]
1831
+ else:
1832
+ batch_size = inputs_embeds.shape[0]
1833
+
1834
+ if self.config.pad_token_id is None and batch_size != 1:
1835
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1836
+ if self.config.pad_token_id is None:
1837
+ sequence_lengths = -1
1838
+ else:
1839
+ if input_ids is not None:
1840
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1841
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1842
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1843
+ sequence_lengths = sequence_lengths.to(logits.device)
1844
+ else:
1845
+ sequence_lengths = -1
1846
+
1847
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1848
+
1849
+ loss = None
1850
+ if labels is not None:
1851
+ labels = labels.to(logits.device)
1852
+ if self.config.problem_type is None:
1853
+ if self.num_labels == 1:
1854
+ self.config.problem_type = "regression"
1855
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1856
+ self.config.problem_type = "single_label_classification"
1857
+ else:
1858
+ self.config.problem_type = "multi_label_classification"
1859
+
1860
+ if self.config.problem_type == "regression":
1861
+ loss_fct = MSELoss()
1862
+ if self.num_labels == 1:
1863
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1864
+ else:
1865
+ loss = loss_fct(pooled_logits, labels)
1866
+ elif self.config.problem_type == "single_label_classification":
1867
+ loss_fct = CrossEntropyLoss()
1868
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1869
+ elif self.config.problem_type == "multi_label_classification":
1870
+ loss_fct = BCEWithLogitsLoss()
1871
+ loss = loss_fct(pooled_logits, labels)
1872
+ if not return_dict:
1873
+ output = (pooled_logits,) + transformer_outputs[1:]
1874
+ return ((loss,) + output) if loss is not None else output
1875
+
1876
+ return SequenceClassifierOutputWithPast(
1877
+ loss=loss,
1878
+ logits=pooled_logits,
1879
+ past_key_values=transformer_outputs.past_key_values,
1880
+ hidden_states=transformer_outputs.hidden_states,
1881
+ attentions=transformer_outputs.attentions,
1882
+ )
venv/lib/python3.10/site-packages/transformers/models/maskformer/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
21
+ "configuration_maskformer_swin": ["MaskFormerSwinConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["feature_extraction_maskformer"] = ["MaskFormerFeatureExtractor"]
31
+ _import_structure["image_processing_maskformer"] = ["MaskFormerImageProcessor"]
32
+
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_maskformer"] = [
41
+ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "MaskFormerForInstanceSegmentation",
43
+ "MaskFormerModel",
44
+ "MaskFormerPreTrainedModel",
45
+ ]
46
+ _import_structure["modeling_maskformer_swin"] = [
47
+ "MaskFormerSwinBackbone",
48
+ "MaskFormerSwinModel",
49
+ "MaskFormerSwinPreTrainedModel",
50
+ ]
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
54
+ from .configuration_maskformer_swin import MaskFormerSwinConfig
55
+
56
+ try:
57
+ if not is_vision_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .feature_extraction_maskformer import MaskFormerFeatureExtractor
63
+ from .image_processing_maskformer import MaskFormerImageProcessor
64
+ try:
65
+ if not is_torch_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_maskformer import (
71
+ MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ MaskFormerForInstanceSegmentation,
73
+ MaskFormerModel,
74
+ MaskFormerPreTrainedModel,
75
+ )
76
+ from .modeling_maskformer_swin import (
77
+ MaskFormerSwinBackbone,
78
+ MaskFormerSwinModel,
79
+ MaskFormerSwinPreTrainedModel,
80
+ )
81
+
82
+
83
+ else:
84
+ import sys
85
+
86
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import sys
16
+ from argparse import ArgumentParser
17
+ from dataclasses import dataclass
18
+ from pathlib import Path
19
+ from pprint import pformat
20
+ from typing import Any, Dict, Iterator, List, Set, Tuple
21
+
22
+ import requests
23
+ import torch
24
+ import torchvision.transforms as T
25
+ from detectron2.checkpoint import DetectionCheckpointer
26
+ from detectron2.config import get_cfg
27
+ from detectron2.data import MetadataCatalog
28
+ from detectron2.projects.deeplab import add_deeplab_config
29
+ from PIL import Image
30
+ from torch import Tensor, nn
31
+
32
+ from transformers.models.maskformer.feature_extraction_maskformer import MaskFormerImageProcessor
33
+ from transformers.models.maskformer.modeling_maskformer import (
34
+ MaskFormerConfig,
35
+ MaskFormerForInstanceSegmentation,
36
+ MaskFormerForInstanceSegmentationOutput,
37
+ MaskFormerModel,
38
+ MaskFormerModelOutput,
39
+ )
40
+ from transformers.utils import logging
41
+
42
+
43
+ StateDict = Dict[str, Tensor]
44
+
45
+ logging.set_verbosity_info()
46
+ logger = logging.get_logger()
47
+
48
+ torch.manual_seed(0)
49
+
50
+
51
+ class TrackedStateDict:
52
+ def __init__(self, to_track: Dict):
53
+ """This class "tracks" a python dictionary by keeping track of which item is accessed.
54
+
55
+ Args:
56
+ to_track (Dict): The dictionary we wish to track
57
+ """
58
+ self.to_track = to_track
59
+ self._seen: Set[str] = set()
60
+
61
+ def __getitem__(self, key: str) -> Any:
62
+ return self.to_track[key]
63
+
64
+ def __setitem__(self, key: str, item: Any):
65
+ self._seen.add(key)
66
+ self.to_track[key] = item
67
+
68
+ def diff(self) -> List[str]:
69
+ """This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
70
+ This is an effective method to check if we have update all the keys
71
+
72
+ Returns:
73
+ List[str]: List of keys not yet updated
74
+ """
75
+ return set(self.to_track.keys()) - self._seen
76
+
77
+ def copy(self) -> Dict:
78
+ # proxy the call to the internal dictionary
79
+ return self.to_track.copy()
80
+
81
+
82
+ # We will verify our results on an image of cute cats
83
+ def prepare_img():
84
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
85
+ img_data = requests.get(url, stream=True).raw
86
+ im = Image.open(img_data)
87
+ return im
88
+
89
+
90
+ @dataclass
91
+ class Args:
92
+ """Fake command line arguments needed by maskformer/detectron implementation"""
93
+
94
+ config_file: str
95
+
96
+
97
+ def setup_cfg(args: Args):
98
+ # load config from file and command-line arguments
99
+ cfg = get_cfg()
100
+ add_deeplab_config(cfg)
101
+ add_mask_former_config(cfg)
102
+ cfg.merge_from_file(args.config_file)
103
+ cfg.freeze()
104
+ return cfg
105
+
106
+
107
+ class OriginalMaskFormerConfigToOursConverter:
108
+ def __call__(self, original_config: object) -> MaskFormerConfig:
109
+ model = original_config.MODEL
110
+ mask_former = model.MASK_FORMER
111
+ swin = model.SWIN
112
+
113
+ dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
114
+ id2label = dict(enumerate(dataset_catalog.stuff_classes))
115
+ label2id = {label: idx for idx, label in id2label.items()}
116
+
117
+ config: MaskFormerConfig = MaskFormerConfig(
118
+ fpn_feature_size=model.SEM_SEG_HEAD.CONVS_DIM,
119
+ mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM,
120
+ num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
121
+ no_object_weight=mask_former.NO_OBJECT_WEIGHT,
122
+ num_queries=mask_former.NUM_OBJECT_QUERIES,
123
+ backbone_config={
124
+ "pretrain_img_size": swin.PRETRAIN_IMG_SIZE,
125
+ "image_size": swin.PRETRAIN_IMG_SIZE,
126
+ "in_channels": 3,
127
+ "patch_size": swin.PATCH_SIZE,
128
+ "embed_dim": swin.EMBED_DIM,
129
+ "depths": swin.DEPTHS,
130
+ "num_heads": swin.NUM_HEADS,
131
+ "window_size": swin.WINDOW_SIZE,
132
+ "drop_path_rate": swin.DROP_PATH_RATE,
133
+ "model_type": "swin",
134
+ },
135
+ dice_weight=mask_former.DICE_WEIGHT,
136
+ ce_weight=1.0,
137
+ mask_weight=mask_former.MASK_WEIGHT,
138
+ decoder_config={
139
+ "model_type": "detr",
140
+ "max_position_embeddings": 1024,
141
+ "encoder_layers": 6,
142
+ "encoder_ffn_dim": 2048,
143
+ "encoder_attention_heads": 8,
144
+ "decoder_layers": mask_former.DEC_LAYERS,
145
+ "decoder_ffn_dim": mask_former.DIM_FEEDFORWARD,
146
+ "decoder_attention_heads": mask_former.NHEADS,
147
+ "encoder_layerdrop": 0.0,
148
+ "decoder_layerdrop": 0.0,
149
+ "d_model": mask_former.HIDDEN_DIM,
150
+ "dropout": mask_former.DROPOUT,
151
+ "attention_dropout": 0.0,
152
+ "activation_dropout": 0.0,
153
+ "init_std": 0.02,
154
+ "init_xavier_std": 1.0,
155
+ "scale_embedding": False,
156
+ "auxiliary_loss": False,
157
+ "dilation": False,
158
+ # default pretrained config values
159
+ },
160
+ id2label=id2label,
161
+ label2id=label2id,
162
+ )
163
+
164
+ return config
165
+
166
+
167
+ class OriginalMaskFormerConfigToImageProcessorConverter:
168
+ def __call__(self, original_config: object) -> MaskFormerImageProcessor:
169
+ model = original_config.MODEL
170
+ model_input = original_config.INPUT
171
+ dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
172
+
173
+ return MaskFormerImageProcessor(
174
+ image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(),
175
+ image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(),
176
+ size=model_input.MIN_SIZE_TEST,
177
+ max_size=model_input.MAX_SIZE_TEST,
178
+ num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
179
+ ignore_index=dataset_catalog.ignore_label,
180
+ size_divisibility=32, # 32 is required by swin
181
+ )
182
+
183
+
184
+ class OriginalMaskFormerCheckpointToOursConverter:
185
+ def __init__(self, original_model: nn.Module, config: MaskFormerConfig):
186
+ self.original_model = original_model
187
+ self.config = config
188
+
189
+ def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
190
+ for src_key, dst_key in renamed_keys:
191
+ dst_state_dict[dst_key] = src_state_dict.pop(src_key)
192
+
193
+ def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig):
194
+ dst_prefix: str = "pixel_level_module.encoder"
195
+ src_prefix: str = "backbone"
196
+
197
+ renamed_keys = [
198
+ (
199
+ f"{src_prefix}.patch_embed.proj.weight",
200
+ f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight",
201
+ ),
202
+ (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"),
203
+ (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"),
204
+ (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"),
205
+ ]
206
+ num_layers = len(config.backbone_config.depths)
207
+ for layer_idx in range(num_layers):
208
+ for block_idx in range(config.backbone_config.depths[layer_idx]):
209
+ renamed_keys.extend(
210
+ [ # src, dst
211
+ (
212
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
213
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
214
+ ),
215
+ (
216
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
217
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
218
+ ),
219
+ (
220
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
221
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
222
+ ),
223
+ ]
224
+ )
225
+ # now we need to handle the attentions
226
+ # read in weights + bias of input projection layer of cross-attention
227
+
228
+ src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
229
+ src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
230
+
231
+ size = src_att_weight.shape[0]
232
+ offset = size // 3
233
+ dst_state_dict[
234
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
235
+ ] = src_att_weight[:offset, :]
236
+ dst_state_dict[
237
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
238
+ ] = src_att_bias[:offset]
239
+
240
+ dst_state_dict[
241
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
242
+ ] = src_att_weight[offset : offset * 2, :]
243
+ dst_state_dict[
244
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
245
+ ] = src_att_bias[offset : offset * 2]
246
+
247
+ dst_state_dict[
248
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
249
+ ] = src_att_weight[-offset:, :]
250
+ dst_state_dict[
251
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
252
+ ] = src_att_bias[-offset:]
253
+
254
+ # let's pop them
255
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
256
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
257
+ # proj
258
+ renamed_keys.extend(
259
+ [
260
+ (
261
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
262
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
263
+ ),
264
+ (
265
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
266
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
267
+ ),
268
+ ]
269
+ )
270
+
271
+ # second norm
272
+ renamed_keys.extend(
273
+ [
274
+ (
275
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
276
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
277
+ ),
278
+ (
279
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
280
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
281
+ ),
282
+ ]
283
+ )
284
+
285
+ # mlp
286
+ renamed_keys.extend(
287
+ [
288
+ (
289
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
290
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
291
+ ),
292
+ (
293
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
294
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
295
+ ),
296
+ (
297
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
298
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
299
+ ),
300
+ (
301
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
302
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
303
+ ),
304
+ ]
305
+ )
306
+
307
+ renamed_keys.extend(
308
+ [
309
+ (
310
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
311
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
312
+ )
313
+ ]
314
+ )
315
+
316
+ if layer_idx < num_layers - 1:
317
+ # patch merging
318
+ renamed_keys.extend(
319
+ [
320
+ (
321
+ f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
322
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight",
323
+ ),
324
+ (
325
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
326
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight",
327
+ ),
328
+ (
329
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
330
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias",
331
+ ),
332
+ ]
333
+ )
334
+
335
+ # hidden states norms
336
+ renamed_keys.extend(
337
+ [
338
+ (
339
+ f"{src_prefix}.norm{layer_idx}.weight",
340
+ f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight",
341
+ ),
342
+ (
343
+ f"{src_prefix}.norm{layer_idx}.bias",
344
+ f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias",
345
+ ),
346
+ ]
347
+ )
348
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
349
+
350
+ def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
351
+ dst_prefix: str = "pixel_level_module.decoder"
352
+ src_prefix: str = "sem_seg_head.pixel_decoder"
353
+
354
+ self.replace_backbone(dst_state_dict, src_state_dict, self.config)
355
+
356
+ def rename_keys_for_conv(detectron_conv: str, mine_conv: str):
357
+ return [
358
+ (f"{detectron_conv}.weight", f"{mine_conv}.0.weight"),
359
+ # 2 cuz the have act in the middle -> rename it
360
+ (f"{detectron_conv}.norm.weight", f"{mine_conv}.1.weight"),
361
+ (f"{detectron_conv}.norm.bias", f"{mine_conv}.1.bias"),
362
+ ]
363
+
364
+ renamed_keys = [
365
+ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"),
366
+ (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"),
367
+ # the layers in the original one are in reverse order, stem is the last one!
368
+ ]
369
+
370
+ renamed_keys.extend(rename_keys_for_conv(f"{src_prefix}.layer_4", f"{dst_prefix}.fpn.stem"))
371
+
372
+ # add all the fpn layers (here we need some config parameters to know the size in advance)
373
+ for src_i, dst_i in zip(range(3, 0, -1), range(0, 3)):
374
+ renamed_keys.extend(
375
+ rename_keys_for_conv(f"{src_prefix}.adapter_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.proj")
376
+ )
377
+ renamed_keys.extend(
378
+ rename_keys_for_conv(f"{src_prefix}.layer_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.block")
379
+ )
380
+
381
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
382
+
383
+ def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
384
+ dst_prefix: str = "transformer_module.decoder"
385
+ src_prefix: str = "sem_seg_head.predictor.transformer.decoder"
386
+ # not sure why we are not popping direcetly here!
387
+ # here we list all keys to be renamed (original name on the left, our name on the right)
388
+ rename_keys = []
389
+ for i in range(self.config.decoder_config.decoder_layers):
390
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
391
+ rename_keys.append(
392
+ (
393
+ f"{src_prefix}.layers.{i}.self_attn.out_proj.weight",
394
+ f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight",
395
+ )
396
+ )
397
+ rename_keys.append(
398
+ (
399
+ f"{src_prefix}.layers.{i}.self_attn.out_proj.bias",
400
+ f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias",
401
+ )
402
+ )
403
+ rename_keys.append(
404
+ (
405
+ f"{src_prefix}.layers.{i}.multihead_attn.out_proj.weight",
406
+ f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.weight",
407
+ )
408
+ )
409
+ rename_keys.append(
410
+ (
411
+ f"{src_prefix}.layers.{i}.multihead_attn.out_proj.bias",
412
+ f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.bias",
413
+ )
414
+ )
415
+ rename_keys.append((f"{src_prefix}.layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight"))
416
+ rename_keys.append((f"{src_prefix}.layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias"))
417
+ rename_keys.append((f"{src_prefix}.layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight"))
418
+ rename_keys.append((f"{src_prefix}.layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias"))
419
+ rename_keys.append(
420
+ (f"{src_prefix}.layers.{i}.norm1.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight")
421
+ )
422
+ rename_keys.append(
423
+ (f"{src_prefix}.layers.{i}.norm1.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias")
424
+ )
425
+ rename_keys.append(
426
+ (f"{src_prefix}.layers.{i}.norm2.weight", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.weight")
427
+ )
428
+ rename_keys.append(
429
+ (f"{src_prefix}.layers.{i}.norm2.bias", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.bias")
430
+ )
431
+ rename_keys.append(
432
+ (f"{src_prefix}.layers.{i}.norm3.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight")
433
+ )
434
+ rename_keys.append(
435
+ (f"{src_prefix}.layers.{i}.norm3.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias")
436
+ )
437
+
438
+ return rename_keys
439
+
440
+ def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
441
+ dst_prefix: str = "transformer_module.decoder"
442
+ src_prefix: str = "sem_seg_head.predictor.transformer.decoder"
443
+ for i in range(self.config.decoder_config.decoder_layers):
444
+ # read in weights + bias of input projection layer of self-attention
445
+ in_proj_weight = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_weight")
446
+ in_proj_bias = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_bias")
447
+ # next, add query, keys and values (in that order) to the state dict
448
+ dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
449
+ dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
450
+ dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
451
+ dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
452
+ dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
453
+ dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
454
+ # read in weights + bias of input projection layer of cross-attention
455
+ in_proj_weight_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_weight")
456
+ in_proj_bias_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_bias")
457
+ # next, add query, keys and values (in that order) of cross-attention to the state dict
458
+ dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
459
+ dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
460
+ dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[
461
+ 256:512, :
462
+ ]
463
+ dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
464
+ dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
465
+ dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
466
+
467
+ def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
468
+ dst_prefix: str = "transformer_module.decoder"
469
+ src_prefix: str = "sem_seg_head.predictor.transformer.decoder"
470
+ renamed_keys = self.rename_keys_in_detr_decoder(dst_state_dict, src_state_dict)
471
+ # add more
472
+ renamed_keys.extend(
473
+ [
474
+ (f"{src_prefix}.norm.weight", f"{dst_prefix}.layernorm.weight"),
475
+ (f"{src_prefix}.norm.bias", f"{dst_prefix}.layernorm.bias"),
476
+ ]
477
+ )
478
+
479
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
480
+
481
+ self.replace_q_k_v_in_detr_decoder(dst_state_dict, src_state_dict)
482
+
483
+ def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
484
+ dst_prefix: str = "transformer_module"
485
+ src_prefix: str = "sem_seg_head.predictor"
486
+
487
+ self.replace_detr_decoder(dst_state_dict, src_state_dict)
488
+
489
+ renamed_keys = [
490
+ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"),
491
+ (f"{src_prefix}.input_proj.weight", f"{dst_prefix}.input_projection.weight"),
492
+ (f"{src_prefix}.input_proj.bias", f"{dst_prefix}.input_projection.bias"),
493
+ ]
494
+
495
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
496
+
497
+ def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
498
+ # NOTE in our case we don't have a prefix, thus we removed the "." from the keys later on!
499
+ dst_prefix: str = ""
500
+ src_prefix: str = "sem_seg_head.predictor"
501
+
502
+ renamed_keys = [
503
+ (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"),
504
+ (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"),
505
+ ]
506
+
507
+ mlp_len = 3
508
+ for i in range(mlp_len):
509
+ renamed_keys.extend(
510
+ [
511
+ (f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}mask_embedder.{i}.0.weight"),
512
+ (f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}mask_embedder.{i}.0.bias"),
513
+ ]
514
+ )
515
+ logger.info(f"Replacing keys {pformat(renamed_keys)}")
516
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
517
+
518
+ def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel:
519
+ dst_state_dict = TrackedStateDict(mask_former.state_dict())
520
+ src_state_dict = self.original_model.state_dict()
521
+
522
+ self.replace_pixel_module(dst_state_dict, src_state_dict)
523
+ self.replace_transformer_module(dst_state_dict, src_state_dict)
524
+
525
+ logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}")
526
+ logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}")
527
+ logger.info("🙌 Done")
528
+
529
+ mask_former.load_state_dict(dst_state_dict)
530
+
531
+ return mask_former
532
+
533
+ def convert_instance_segmentation(
534
+ self, mask_former: MaskFormerForInstanceSegmentation
535
+ ) -> MaskFormerForInstanceSegmentation:
536
+ dst_state_dict = TrackedStateDict(mask_former.state_dict())
537
+ src_state_dict = self.original_model.state_dict()
538
+
539
+ self.replace_instance_segmentation_module(dst_state_dict, src_state_dict)
540
+
541
+ mask_former.load_state_dict(dst_state_dict)
542
+
543
+ return mask_former
544
+
545
+ @staticmethod
546
+ def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]:
547
+ checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl")
548
+
549
+ for checkpoint in checkpoints:
550
+ logger.info(f"💪 Converting {checkpoint.stem}")
551
+ # find associated config file
552
+ config: Path = config_dir / checkpoint.parents[0].stem / "swin" / f"{checkpoint.stem}.yaml"
553
+
554
+ yield config, checkpoint
555
+
556
+
557
+ def test(original_model, our_model: MaskFormerForInstanceSegmentation, image_processor: MaskFormerImageProcessor):
558
+ with torch.no_grad():
559
+ original_model = original_model.eval()
560
+ our_model = our_model.eval()
561
+
562
+ im = prepare_img()
563
+
564
+ tr = T.Compose(
565
+ [
566
+ T.Resize((384, 384)),
567
+ T.ToTensor(),
568
+ T.Normalize(
569
+ mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0,
570
+ std=torch.tensor([58.395, 57.120, 57.375]) / 255.0,
571
+ ),
572
+ ],
573
+ )
574
+
575
+ x = tr(im).unsqueeze(0)
576
+
577
+ original_model_backbone_features = original_model.backbone(x.clone())
578
+
579
+ our_model_output: MaskFormerModelOutput = our_model.model(x.clone(), output_hidden_states=True)
580
+
581
+ for original_model_feature, our_model_feature in zip(
582
+ original_model_backbone_features.values(), our_model_output.encoder_hidden_states
583
+ ):
584
+ assert torch.allclose(
585
+ original_model_feature, our_model_feature, atol=1e-3
586
+ ), "The backbone features are not the same."
587
+
588
+ original_model_pixel_out = original_model.sem_seg_head.pixel_decoder.forward_features(
589
+ original_model_backbone_features
590
+ )
591
+
592
+ assert torch.allclose(
593
+ original_model_pixel_out[0], our_model_output.pixel_decoder_last_hidden_state, atol=1e-4
594
+ ), "The pixel decoder feature are not the same"
595
+
596
+ # let's test the full model
597
+ original_model_out = original_model([{"image": x.squeeze(0)}])
598
+
599
+ original_segmentation = original_model_out[0]["sem_seg"]
600
+
601
+ our_model_out: MaskFormerForInstanceSegmentationOutput = our_model(x)
602
+
603
+ our_segmentation = image_processor.post_process_segmentation(our_model_out, target_size=(384, 384))
604
+
605
+ assert torch.allclose(
606
+ original_segmentation, our_segmentation, atol=1e-3
607
+ ), "The segmentation image is not the same."
608
+
609
+ logger.info("✅ Test passed!")
610
+
611
+
612
+ def get_name(checkpoint_file: Path):
613
+ model_name_raw: str = checkpoint_file.stem
614
+ # model_name_raw is something like maskformer_panoptic_swin_base_IN21k_384_bs64_554k
615
+ parent_name: str = checkpoint_file.parents[0].stem
616
+ backbone = "swin"
617
+ dataset = ""
618
+ if "coco" in parent_name:
619
+ dataset = "coco"
620
+ elif "ade" in parent_name:
621
+ dataset = "ade"
622
+ else:
623
+ raise ValueError(f"{parent_name} must be wrong since we didn't find 'coco' or 'ade' in it ")
624
+
625
+ backbone_types = ["tiny", "small", "base", "large"]
626
+
627
+ backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0]
628
+
629
+ model_name = f"maskformer-{backbone}-{backbone_type}-{dataset}"
630
+
631
+ return model_name
632
+
633
+
634
+ if __name__ == "__main__":
635
+ parser = ArgumentParser(
636
+ description="Command line to convert the original maskformers (with swin backbone) to our implementations."
637
+ )
638
+
639
+ parser.add_argument(
640
+ "--checkpoints_dir",
641
+ type=Path,
642
+ help=(
643
+ "A directory containing the model's checkpoints. The directory has to have the following structure:"
644
+ " <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pkl"
645
+ ),
646
+ )
647
+ parser.add_argument(
648
+ "--configs_dir",
649
+ type=Path,
650
+ help=(
651
+ "A directory containing the model's configs, see detectron2 doc. The directory has to have the following"
652
+ " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml"
653
+ ),
654
+ )
655
+ parser.add_argument(
656
+ "--pytorch_dump_folder_path",
657
+ required=True,
658
+ type=Path,
659
+ help="Path to the folder to output PyTorch models.",
660
+ )
661
+ parser.add_argument(
662
+ "--maskformer_dir",
663
+ required=True,
664
+ type=Path,
665
+ help=(
666
+ "A path to MaskFormer's original implementation directory. You can download from here:"
667
+ " https://github.com/facebookresearch/MaskFormer"
668
+ ),
669
+ )
670
+
671
+ args = parser.parse_args()
672
+
673
+ checkpoints_dir: Path = args.checkpoints_dir
674
+ config_dir: Path = args.configs_dir
675
+ save_directory: Path = args.pytorch_dump_folder_path
676
+ maskformer_dir: Path = args.maskformer_dir
677
+ # append the path to the parents to maskformer dir
678
+ sys.path.append(str(maskformer_dir.parent))
679
+ # and import what's needed
680
+ from MaskFormer.mask_former import add_mask_former_config
681
+ from MaskFormer.mask_former.mask_former_model import MaskFormer as OriginalMaskFormer
682
+
683
+ if not save_directory.exists():
684
+ save_directory.mkdir(parents=True)
685
+
686
+ for config_file, checkpoint_file in OriginalMaskFormerCheckpointToOursConverter.using_dirs(
687
+ checkpoints_dir, config_dir
688
+ ):
689
+ image_processor = OriginalMaskFormerConfigToImageProcessorConverter()(setup_cfg(Args(config_file=config_file)))
690
+
691
+ original_config = setup_cfg(Args(config_file=config_file))
692
+ mask_former_kwargs = OriginalMaskFormer.from_config(original_config)
693
+
694
+ original_model = OriginalMaskFormer(**mask_former_kwargs).eval()
695
+
696
+ DetectionCheckpointer(original_model).load(str(checkpoint_file))
697
+
698
+ config: MaskFormerConfig = OriginalMaskFormerConfigToOursConverter()(original_config)
699
+
700
+ mask_former = MaskFormerModel(config=config).eval()
701
+
702
+ converter = OriginalMaskFormerCheckpointToOursConverter(original_model, config)
703
+
704
+ maskformer = converter.convert(mask_former)
705
+
706
+ mask_former_for_instance_segmentation = MaskFormerForInstanceSegmentation(config=config).eval()
707
+
708
+ mask_former_for_instance_segmentation.model = mask_former
709
+ mask_former_for_instance_segmentation = converter.convert_instance_segmentation(
710
+ mask_former_for_instance_segmentation
711
+ )
712
+
713
+ test(original_model, mask_former_for_instance_segmentation, image_processor)
714
+
715
+ model_name = get_name(checkpoint_file)
716
+ logger.info(f"🪄 Saving {model_name}")
717
+
718
+ image_processor.save_pretrained(save_directory / model_name)
719
+ mask_former_for_instance_segmentation.save_pretrained(save_directory / model_name)
720
+
721
+ image_processor.push_to_hub(
722
+ repo_path_or_name=save_directory / model_name,
723
+ commit_message="Add model",
724
+ use_temp_dir=True,
725
+ )
726
+ mask_former_for_instance_segmentation.push_to_hub(
727
+ repo_path_or_name=save_directory / model_name,
728
+ commit_message="Add model",
729
+ use_temp_dir=True,
730
+ )
venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer_swin.py ADDED
@@ -0,0 +1,912 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """MaskFormer Swin Transformer. The reason Swin Transformer is implemented here is because MaskFormer uses the hidden
17
+ states before downsampling, which is different from the default Swin Transformer."""
18
+
19
+ import collections.abc
20
+ import math
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple
23
+
24
+ import torch
25
+ from torch import Tensor, nn
26
+
27
+ from ...activations import ACT2FN
28
+ from ...file_utils import ModelOutput
29
+ from ...modeling_outputs import BackboneOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
32
+ from ...utils.backbone_utils import BackboneMixin
33
+ from .configuration_maskformer_swin import MaskFormerSwinConfig
34
+
35
+
36
+ @dataclass
37
+ class MaskFormerSwinModelOutputWithPooling(ModelOutput):
38
+ """
39
+ Class for MaskFormerSwinModel's outputs that also contains the spatial dimensions of the hidden states.
40
+
41
+ Args:
42
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
43
+ Sequence of hidden-states at the output of the last layer of the model.
44
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
45
+ Last layer hidden-state after a mean pooling operation.
46
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
47
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
48
+ shape `(batch_size, sequence_length, hidden_size)`.
49
+
50
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
51
+ hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
52
+ A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
53
+ `batch, channels, height, width`. Due to padding, their spatial size cannot be inferred before the
54
+ `forward` method.
55
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
56
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
57
+ sequence_length)`.
58
+
59
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
60
+ heads.
61
+ """
62
+
63
+ last_hidden_state: torch.FloatTensor = None
64
+ pooler_output: torch.FloatTensor = None
65
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
66
+ hidden_states_spatial_dimensions: Tuple[Tuple[int, int]] = None
67
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
68
+
69
+
70
+ @dataclass
71
+ class MaskFormerSwinBaseModelOutput(ModelOutput):
72
+ """
73
+ Class for SwinEncoder's outputs.
74
+
75
+ Args:
76
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
77
+ Sequence of hidden-states at the output of the last layer of the model.
78
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
79
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
80
+ shape `(batch_size, sequence_length, hidden_size)`.
81
+
82
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
83
+ hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
84
+ A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
85
+ `batch, channels, height, width`. Due to padding, their spatial size cannot inferred before the `forward`
86
+ method.
87
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
88
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
89
+ sequence_length)`.
90
+
91
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
92
+ heads.
93
+ """
94
+
95
+ last_hidden_state: torch.FloatTensor = None
96
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
97
+ hidden_states_spatial_dimensions: Tuple[Tuple[int, int]] = None
98
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
99
+
100
+
101
+ # Copied from transformers.models.swin.modeling_swin.window_partition
102
+ def window_partition(input_feature, window_size):
103
+ """
104
+ Partitions the given input into windows.
105
+ """
106
+ batch_size, height, width, num_channels = input_feature.shape
107
+ input_feature = input_feature.view(
108
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
109
+ )
110
+ windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
111
+ return windows
112
+
113
+
114
+ # Copied from transformers.models.swin.modeling_swin.window_reverse
115
+ def window_reverse(windows, window_size, height, width):
116
+ """
117
+ Merges windows to produce higher resolution features.
118
+ """
119
+ num_channels = windows.shape[-1]
120
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
121
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
122
+ return windows
123
+
124
+
125
+ # Copied from transformers.models.swin.modeling_swin.drop_path
126
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
127
+ """
128
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
129
+
130
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
131
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
132
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
133
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
134
+ argument.
135
+ """
136
+ if drop_prob == 0.0 or not training:
137
+ return input
138
+ keep_prob = 1 - drop_prob
139
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
140
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
141
+ random_tensor.floor_() # binarize
142
+ output = input.div(keep_prob) * random_tensor
143
+ return output
144
+
145
+
146
+ class MaskFormerSwinEmbeddings(nn.Module):
147
+ """
148
+ Construct the patch and position embeddings.
149
+ """
150
+
151
+ def __init__(self, config):
152
+ super().__init__()
153
+
154
+ self.patch_embeddings = MaskFormerSwinPatchEmbeddings(config)
155
+ num_patches = self.patch_embeddings.num_patches
156
+ self.patch_grid = self.patch_embeddings.grid_size
157
+
158
+ if config.use_absolute_embeddings:
159
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
160
+ else:
161
+ self.position_embeddings = None
162
+
163
+ self.norm = nn.LayerNorm(config.embed_dim)
164
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
165
+
166
+ def forward(self, pixel_values):
167
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
168
+ embeddings = self.norm(embeddings)
169
+
170
+ if self.position_embeddings is not None:
171
+ embeddings = embeddings + self.position_embeddings
172
+
173
+ embeddings = self.dropout(embeddings)
174
+
175
+ return embeddings, output_dimensions
176
+
177
+
178
+ # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings
179
+ class MaskFormerSwinPatchEmbeddings(nn.Module):
180
+ """
181
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
182
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
183
+ Transformer.
184
+ """
185
+
186
+ def __init__(self, config):
187
+ super().__init__()
188
+ image_size, patch_size = config.image_size, config.patch_size
189
+ num_channels, hidden_size = config.num_channels, config.embed_dim
190
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
191
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
192
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
193
+ self.image_size = image_size
194
+ self.patch_size = patch_size
195
+ self.num_channels = num_channels
196
+ self.num_patches = num_patches
197
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
198
+
199
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
200
+
201
+ def maybe_pad(self, pixel_values, height, width):
202
+ if width % self.patch_size[1] != 0:
203
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
204
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
205
+ if height % self.patch_size[0] != 0:
206
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
207
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
208
+ return pixel_values
209
+
210
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
211
+ _, num_channels, height, width = pixel_values.shape
212
+ if num_channels != self.num_channels:
213
+ raise ValueError(
214
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
215
+ )
216
+ # pad the input to be divisible by self.patch_size, if needed
217
+ pixel_values = self.maybe_pad(pixel_values, height, width)
218
+ embeddings = self.projection(pixel_values)
219
+ _, _, height, width = embeddings.shape
220
+ output_dimensions = (height, width)
221
+ embeddings = embeddings.flatten(2).transpose(1, 2)
222
+
223
+ return embeddings, output_dimensions
224
+
225
+
226
+ # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging
227
+ class MaskFormerSwinPatchMerging(nn.Module):
228
+ """
229
+ Patch Merging Layer.
230
+
231
+ Args:
232
+ input_resolution (`Tuple[int]`):
233
+ Resolution of input feature.
234
+ dim (`int`):
235
+ Number of input channels.
236
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
237
+ Normalization layer class.
238
+ """
239
+
240
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
241
+ super().__init__()
242
+ self.input_resolution = input_resolution
243
+ self.dim = dim
244
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
245
+ self.norm = norm_layer(4 * dim)
246
+
247
+ def maybe_pad(self, input_feature, height, width):
248
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
249
+ if should_pad:
250
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
251
+ input_feature = nn.functional.pad(input_feature, pad_values)
252
+
253
+ return input_feature
254
+
255
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
256
+ height, width = input_dimensions
257
+ # `dim` is height * width
258
+ batch_size, dim, num_channels = input_feature.shape
259
+
260
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
261
+ # pad input to be disible by width and height, if needed
262
+ input_feature = self.maybe_pad(input_feature, height, width)
263
+ # [batch_size, height/2, width/2, num_channels]
264
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
265
+ # [batch_size, height/2, width/2, num_channels]
266
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
267
+ # [batch_size, height/2, width/2, num_channels]
268
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
269
+ # [batch_size, height/2, width/2, num_channels]
270
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
271
+ # batch_size height/2 width/2 4*num_channels
272
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
273
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
274
+
275
+ input_feature = self.norm(input_feature)
276
+ input_feature = self.reduction(input_feature)
277
+
278
+ return input_feature
279
+
280
+
281
+ # Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->MaskFormerSwin
282
+ class MaskFormerSwinDropPath(nn.Module):
283
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
284
+
285
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
286
+ super().__init__()
287
+ self.drop_prob = drop_prob
288
+
289
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
290
+ return drop_path(hidden_states, self.drop_prob, self.training)
291
+
292
+ def extra_repr(self) -> str:
293
+ return "p={}".format(self.drop_prob)
294
+
295
+
296
+ # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->MaskFormerSwin
297
+ class MaskFormerSwinSelfAttention(nn.Module):
298
+ def __init__(self, config, dim, num_heads, window_size):
299
+ super().__init__()
300
+ if dim % num_heads != 0:
301
+ raise ValueError(
302
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
303
+ )
304
+
305
+ self.num_attention_heads = num_heads
306
+ self.attention_head_size = int(dim / num_heads)
307
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
308
+ self.window_size = (
309
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
310
+ )
311
+
312
+ self.relative_position_bias_table = nn.Parameter(
313
+ torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
314
+ )
315
+
316
+ # get pair-wise relative position index for each token inside the window
317
+ coords_h = torch.arange(self.window_size[0])
318
+ coords_w = torch.arange(self.window_size[1])
319
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
320
+ coords_flatten = torch.flatten(coords, 1)
321
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
322
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
323
+ relative_coords[:, :, 0] += self.window_size[0] - 1
324
+ relative_coords[:, :, 1] += self.window_size[1] - 1
325
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
326
+ relative_position_index = relative_coords.sum(-1)
327
+ self.register_buffer("relative_position_index", relative_position_index)
328
+
329
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
330
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
331
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
332
+
333
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
334
+
335
+ def transpose_for_scores(self, x):
336
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
337
+ x = x.view(new_x_shape)
338
+ return x.permute(0, 2, 1, 3)
339
+
340
+ def forward(
341
+ self,
342
+ hidden_states: torch.Tensor,
343
+ attention_mask: Optional[torch.FloatTensor] = None,
344
+ head_mask: Optional[torch.FloatTensor] = None,
345
+ output_attentions: Optional[bool] = False,
346
+ ) -> Tuple[torch.Tensor]:
347
+ batch_size, dim, num_channels = hidden_states.shape
348
+ mixed_query_layer = self.query(hidden_states)
349
+
350
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
351
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
352
+ query_layer = self.transpose_for_scores(mixed_query_layer)
353
+
354
+ # Take the dot product between "query" and "key" to get the raw attention scores.
355
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
356
+
357
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
358
+
359
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
360
+ relative_position_bias = relative_position_bias.view(
361
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
362
+ )
363
+
364
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
365
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
366
+
367
+ if attention_mask is not None:
368
+ # Apply the attention mask is (precomputed for all layers in MaskFormerSwinModel forward() function)
369
+ mask_shape = attention_mask.shape[0]
370
+ attention_scores = attention_scores.view(
371
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
372
+ )
373
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
374
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
375
+
376
+ # Normalize the attention scores to probabilities.
377
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
378
+
379
+ # This is actually dropping out entire tokens to attend to, which might
380
+ # seem a bit unusual, but is taken from the original Transformer paper.
381
+ attention_probs = self.dropout(attention_probs)
382
+
383
+ # Mask heads if we want to
384
+ if head_mask is not None:
385
+ attention_probs = attention_probs * head_mask
386
+
387
+ context_layer = torch.matmul(attention_probs, value_layer)
388
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
389
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
390
+ context_layer = context_layer.view(new_context_layer_shape)
391
+
392
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
393
+
394
+ return outputs
395
+
396
+
397
+ # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->MaskFormerSwin
398
+ class MaskFormerSwinSelfOutput(nn.Module):
399
+ def __init__(self, config, dim):
400
+ super().__init__()
401
+ self.dense = nn.Linear(dim, dim)
402
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
403
+
404
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
405
+ hidden_states = self.dense(hidden_states)
406
+ hidden_states = self.dropout(hidden_states)
407
+
408
+ return hidden_states
409
+
410
+
411
+ # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->MaskFormerSwin
412
+ class MaskFormerSwinAttention(nn.Module):
413
+ def __init__(self, config, dim, num_heads, window_size):
414
+ super().__init__()
415
+ self.self = MaskFormerSwinSelfAttention(config, dim, num_heads, window_size)
416
+ self.output = MaskFormerSwinSelfOutput(config, dim)
417
+ self.pruned_heads = set()
418
+
419
+ def prune_heads(self, heads):
420
+ if len(heads) == 0:
421
+ return
422
+ heads, index = find_pruneable_heads_and_indices(
423
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
424
+ )
425
+
426
+ # Prune linear layers
427
+ self.self.query = prune_linear_layer(self.self.query, index)
428
+ self.self.key = prune_linear_layer(self.self.key, index)
429
+ self.self.value = prune_linear_layer(self.self.value, index)
430
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
431
+
432
+ # Update hyper params and store pruned heads
433
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
434
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
435
+ self.pruned_heads = self.pruned_heads.union(heads)
436
+
437
+ def forward(
438
+ self,
439
+ hidden_states: torch.Tensor,
440
+ attention_mask: Optional[torch.FloatTensor] = None,
441
+ head_mask: Optional[torch.FloatTensor] = None,
442
+ output_attentions: Optional[bool] = False,
443
+ ) -> Tuple[torch.Tensor]:
444
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
445
+ attention_output = self.output(self_outputs[0], hidden_states)
446
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
447
+ return outputs
448
+
449
+
450
+ # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->MaskFormerSwin
451
+ class MaskFormerSwinIntermediate(nn.Module):
452
+ def __init__(self, config, dim):
453
+ super().__init__()
454
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
455
+ if isinstance(config.hidden_act, str):
456
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
457
+ else:
458
+ self.intermediate_act_fn = config.hidden_act
459
+
460
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
461
+ hidden_states = self.dense(hidden_states)
462
+ hidden_states = self.intermediate_act_fn(hidden_states)
463
+ return hidden_states
464
+
465
+
466
+ # Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->MaskFormerSwin
467
+ class MaskFormerSwinOutput(nn.Module):
468
+ def __init__(self, config, dim):
469
+ super().__init__()
470
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
471
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
472
+
473
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
474
+ hidden_states = self.dense(hidden_states)
475
+ hidden_states = self.dropout(hidden_states)
476
+ return hidden_states
477
+
478
+
479
+ class MaskFormerSwinLayer(nn.Module):
480
+ def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
481
+ super().__init__()
482
+ self.shift_size = shift_size
483
+ self.window_size = config.window_size
484
+ self.input_resolution = input_resolution
485
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
486
+ self.attention = MaskFormerSwinAttention(config, dim, num_heads, self.window_size)
487
+ self.drop_path = (
488
+ MaskFormerSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
489
+ )
490
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
491
+ self.intermediate = MaskFormerSwinIntermediate(config, dim)
492
+ self.output = MaskFormerSwinOutput(config, dim)
493
+
494
+ def get_attn_mask(self, input_resolution):
495
+ if self.shift_size > 0:
496
+ # calculate attention mask for SW-MSA
497
+ height, width = input_resolution
498
+ img_mask = torch.zeros((1, height, width, 1))
499
+ height_slices = (
500
+ slice(0, -self.window_size),
501
+ slice(-self.window_size, -self.shift_size),
502
+ slice(-self.shift_size, None),
503
+ )
504
+ width_slices = (
505
+ slice(0, -self.window_size),
506
+ slice(-self.window_size, -self.shift_size),
507
+ slice(-self.shift_size, None),
508
+ )
509
+ count = 0
510
+ for height_slice in height_slices:
511
+ for width_slice in width_slices:
512
+ img_mask[:, height_slice, width_slice, :] = count
513
+ count += 1
514
+
515
+ mask_windows = window_partition(img_mask, self.window_size)
516
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
517
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
518
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
519
+ else:
520
+ attn_mask = None
521
+ return attn_mask
522
+
523
+ def maybe_pad(self, hidden_states, height, width):
524
+ pad_left = pad_top = 0
525
+ pad_rigth = (self.window_size - width % self.window_size) % self.window_size
526
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
527
+ pad_values = (0, 0, pad_left, pad_rigth, pad_top, pad_bottom)
528
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
529
+ return hidden_states, pad_values
530
+
531
+ def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
532
+ height, width = input_dimensions
533
+ batch_size, dim, channels = hidden_states.size()
534
+ shortcut = hidden_states
535
+
536
+ hidden_states = self.layernorm_before(hidden_states)
537
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
538
+ # pad hidden_states to multiples of window size
539
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
540
+
541
+ _, height_pad, width_pad, _ = hidden_states.shape
542
+ # cyclic shift
543
+ if self.shift_size > 0:
544
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
545
+ else:
546
+ shifted_hidden_states = hidden_states
547
+
548
+ # partition windows
549
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
550
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
551
+ attn_mask = self.get_attn_mask((height_pad, width_pad))
552
+ if attn_mask is not None:
553
+ attn_mask = attn_mask.to(hidden_states_windows.device)
554
+
555
+ self_attention_outputs = self.attention(
556
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
557
+ )
558
+
559
+ attention_output = self_attention_outputs[0]
560
+
561
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
562
+
563
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
564
+ shifted_windows = window_reverse(
565
+ attention_windows, self.window_size, height_pad, width_pad
566
+ ) # B height' width' C
567
+
568
+ # reverse cyclic shift
569
+ if self.shift_size > 0:
570
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
571
+ else:
572
+ attention_windows = shifted_windows
573
+
574
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
575
+ if was_padded:
576
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
577
+
578
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
579
+
580
+ hidden_states = shortcut + self.drop_path(attention_windows)
581
+
582
+ layer_output = self.layernorm_after(hidden_states)
583
+ layer_output = self.intermediate(layer_output)
584
+ layer_output = hidden_states + self.output(layer_output)
585
+
586
+ outputs = (layer_output,) + outputs
587
+
588
+ return outputs
589
+
590
+
591
+ class MaskFormerSwinStage(nn.Module):
592
+ # Copied from transformers.models.swin.modeling_swin.SwinStage.__init__ with Swin->MaskFormerSwin
593
+ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
594
+ super().__init__()
595
+ self.config = config
596
+ self.dim = dim
597
+ self.blocks = nn.ModuleList(
598
+ [
599
+ MaskFormerSwinLayer(
600
+ config=config,
601
+ dim=dim,
602
+ input_resolution=input_resolution,
603
+ num_heads=num_heads,
604
+ shift_size=0 if (i % 2 == 0) else config.window_size // 2,
605
+ )
606
+ for i in range(depth)
607
+ ]
608
+ )
609
+
610
+ # patch merging layer
611
+ if downsample is not None:
612
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
613
+ else:
614
+ self.downsample = None
615
+
616
+ self.pointing = False
617
+
618
+ def forward(
619
+ self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False
620
+ ):
621
+ all_hidden_states = () if output_hidden_states else None
622
+
623
+ height, width = input_dimensions
624
+ for i, block_module in enumerate(self.blocks):
625
+ if output_hidden_states:
626
+ all_hidden_states = all_hidden_states + (hidden_states,)
627
+
628
+ layer_head_mask = head_mask[i] if head_mask is not None else None
629
+
630
+ block_hidden_states = block_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
631
+
632
+ hidden_states = block_hidden_states[0]
633
+
634
+ if output_hidden_states:
635
+ all_hidden_states += (hidden_states,)
636
+
637
+ if self.downsample is not None:
638
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
639
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
640
+ hidden_states = self.downsample(hidden_states, input_dimensions)
641
+ else:
642
+ output_dimensions = (height, width, height, width)
643
+
644
+ return hidden_states, output_dimensions, all_hidden_states
645
+
646
+
647
+ class MaskFormerSwinEncoder(nn.Module):
648
+ # Copied from transformers.models.swin.modeling_swin.SwinEncoder.__init__ with Swin->MaskFormerSwin
649
+ def __init__(self, config, grid_size):
650
+ super().__init__()
651
+ self.num_layers = len(config.depths)
652
+ self.config = config
653
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
654
+ self.layers = nn.ModuleList(
655
+ [
656
+ MaskFormerSwinStage(
657
+ config=config,
658
+ dim=int(config.embed_dim * 2**i_layer),
659
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
660
+ depth=config.depths[i_layer],
661
+ num_heads=config.num_heads[i_layer],
662
+ drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
663
+ downsample=MaskFormerSwinPatchMerging if (i_layer < self.num_layers - 1) else None,
664
+ )
665
+ for i_layer in range(self.num_layers)
666
+ ]
667
+ )
668
+
669
+ self.gradient_checkpointing = False
670
+
671
+ def forward(
672
+ self,
673
+ hidden_states,
674
+ input_dimensions,
675
+ head_mask=None,
676
+ output_attentions=False,
677
+ output_hidden_states=False,
678
+ return_dict=True,
679
+ ):
680
+ all_hidden_states = () if output_hidden_states else None
681
+ all_input_dimensions = ()
682
+ all_self_attentions = () if output_attentions else None
683
+
684
+ if output_hidden_states:
685
+ all_hidden_states = all_hidden_states + (hidden_states,)
686
+
687
+ for i, layer_module in enumerate(self.layers):
688
+ layer_head_mask = head_mask[i] if head_mask is not None else None
689
+
690
+ if self.gradient_checkpointing and self.training:
691
+ layer_hidden_states, output_dimensions, layer_all_hidden_states = self._gradient_checkpointing_func(
692
+ layer_module.__call__,
693
+ hidden_states,
694
+ layer_head_mask,
695
+ output_attentions,
696
+ )
697
+ else:
698
+ layer_hidden_states, output_dimensions, layer_all_hidden_states = layer_module(
699
+ hidden_states,
700
+ input_dimensions,
701
+ layer_head_mask,
702
+ output_attentions,
703
+ output_hidden_states,
704
+ )
705
+
706
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
707
+ all_input_dimensions += (input_dimensions,)
708
+ if output_hidden_states:
709
+ all_hidden_states += (layer_all_hidden_states,)
710
+
711
+ hidden_states = layer_hidden_states
712
+
713
+ if output_attentions:
714
+ all_self_attentions = all_self_attentions + (layer_all_hidden_states[1],)
715
+
716
+ if not return_dict:
717
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
718
+
719
+ return MaskFormerSwinBaseModelOutput(
720
+ last_hidden_state=hidden_states,
721
+ hidden_states=all_hidden_states,
722
+ hidden_states_spatial_dimensions=all_input_dimensions,
723
+ attentions=all_self_attentions,
724
+ )
725
+
726
+
727
+ # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->MaskFormerSwin, swin->model
728
+ class MaskFormerSwinPreTrainedModel(PreTrainedModel):
729
+ """
730
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
731
+ models.
732
+ """
733
+
734
+ config_class = MaskFormerSwinConfig
735
+ base_model_prefix = "model"
736
+ main_input_name = "pixel_values"
737
+ supports_gradient_checkpointing = True
738
+
739
+ def _init_weights(self, module):
740
+ """Initialize the weights"""
741
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
742
+ # Slightly different from the TF version which uses truncated_normal for initialization
743
+ # cf https://github.com/pytorch/pytorch/pull/5617
744
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
745
+ if module.bias is not None:
746
+ module.bias.data.zero_()
747
+ elif isinstance(module, nn.LayerNorm):
748
+ module.bias.data.zero_()
749
+ module.weight.data.fill_(1.0)
750
+
751
+
752
+ class MaskFormerSwinModel(MaskFormerSwinPreTrainedModel):
753
+ def __init__(self, config, add_pooling_layer=True):
754
+ super().__init__(config)
755
+ self.config = config
756
+ self.num_layers = len(config.depths)
757
+ self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
758
+
759
+ self.embeddings = MaskFormerSwinEmbeddings(config)
760
+ self.encoder = MaskFormerSwinEncoder(config, self.embeddings.patch_grid)
761
+
762
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
763
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
764
+
765
+ def get_input_embeddings(self):
766
+ return self.embeddings.patch_embeddings
767
+
768
+ def _prune_heads(self, heads_to_prune):
769
+ """
770
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
771
+ class PreTrainedModel
772
+ """
773
+ for layer, heads in heads_to_prune.items():
774
+ self.encoder.layer[layer].attention.prune_heads(heads)
775
+
776
+ def forward(
777
+ self,
778
+ pixel_values=None,
779
+ head_mask=None,
780
+ output_attentions=None,
781
+ output_hidden_states=None,
782
+ return_dict=None,
783
+ ):
784
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
785
+ output_hidden_states = (
786
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
787
+ )
788
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
789
+
790
+ if pixel_values is None:
791
+ raise ValueError("You have to specify pixel_values")
792
+
793
+ # Prepare head mask if needed
794
+ # 1.0 in head_mask indicate we keep the head
795
+ # attention_probs has shape bsz x n_heads x N x N
796
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
797
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
798
+ head_mask = self.get_head_mask(head_mask, len(self.config.depths))
799
+
800
+ embedding_output, input_dimensions = self.embeddings(pixel_values)
801
+
802
+ encoder_outputs = self.encoder(
803
+ embedding_output,
804
+ input_dimensions,
805
+ head_mask=head_mask,
806
+ output_attentions=output_attentions,
807
+ output_hidden_states=output_hidden_states,
808
+ return_dict=return_dict,
809
+ )
810
+
811
+ sequence_output = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0]
812
+ sequence_output = self.layernorm(sequence_output)
813
+
814
+ pooled_output = None
815
+ if self.pooler is not None:
816
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
817
+ pooled_output = torch.flatten(pooled_output, 1)
818
+
819
+ if not return_dict:
820
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
821
+
822
+ hidden_states_spatial_dimensions = (input_dimensions,) + encoder_outputs.hidden_states_spatial_dimensions
823
+
824
+ return MaskFormerSwinModelOutputWithPooling(
825
+ last_hidden_state=sequence_output,
826
+ pooler_output=pooled_output,
827
+ hidden_states=encoder_outputs.hidden_states,
828
+ hidden_states_spatial_dimensions=hidden_states_spatial_dimensions,
829
+ attentions=encoder_outputs.attentions,
830
+ )
831
+
832
+
833
+ class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin):
834
+ """
835
+ MaskFormerSwin backbone, designed especially for the MaskFormer framework.
836
+
837
+ This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size,
838
+ num_channels, height, width)`). It also adds additional layernorms after each stage.
839
+
840
+ Args:
841
+ config (`MaskFormerSwinConfig`):
842
+ The configuration used by [`MaskFormerSwinModel`].
843
+ """
844
+
845
+ def __init__(self, config: MaskFormerSwinConfig):
846
+ super().__init__(config)
847
+ super()._init_backbone(config)
848
+
849
+ self.model = MaskFormerSwinModel(config)
850
+ if "stem" in self.out_features:
851
+ raise ValueError("This backbone does not support 'stem' in the `out_features`.")
852
+ self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))]
853
+ self.hidden_states_norms = nn.ModuleList(
854
+ [nn.LayerNorm(num_channels) for num_channels in self.num_features[1:]]
855
+ )
856
+
857
+ # Initialize weights and apply final processing
858
+ self.post_init()
859
+
860
+ def forward(
861
+ self,
862
+ pixel_values: Tensor,
863
+ output_hidden_states: Optional[bool] = None,
864
+ output_attentions: Optional[bool] = None,
865
+ return_dict: Optional[bool] = None,
866
+ ) -> BackboneOutput:
867
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
868
+ output_hidden_states = (
869
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
870
+ )
871
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
872
+
873
+ outputs = self.model(
874
+ pixel_values, output_hidden_states=True, output_attentions=output_attentions, return_dict=True
875
+ )
876
+
877
+ # we skip the stem
878
+ hidden_states = outputs.hidden_states[1:]
879
+
880
+ # we need to reshape the hidden states to their original spatial dimensions
881
+ # spatial dimensions contains all the heights and widths of each stage, including after the embeddings
882
+ spatial_dimensions: Tuple[Tuple[int, int]] = outputs.hidden_states_spatial_dimensions
883
+ feature_maps = ()
884
+ for i, (hidden_state, stage, (height, width)) in enumerate(
885
+ zip(hidden_states, self.stage_names[1:], spatial_dimensions)
886
+ ):
887
+ norm = self.hidden_states_norms[i]
888
+ # the last element corespond to the layer's last block output but before patch merging
889
+ hidden_state_unpolled = hidden_state[-1]
890
+ hidden_state_norm = norm(hidden_state_unpolled)
891
+ # the pixel decoder (FPN) expects 3D tensors (features)
892
+ batch_size, _, hidden_size = hidden_state_norm.shape
893
+ # reshape "b (h w) d -> b d h w"
894
+ hidden_state_permuted = (
895
+ hidden_state_norm.permute(0, 2, 1).view((batch_size, hidden_size, height, width)).contiguous()
896
+ )
897
+ if stage in self.out_features:
898
+ feature_maps += (hidden_state_permuted,)
899
+
900
+ if not return_dict:
901
+ output = (feature_maps,)
902
+ if output_hidden_states:
903
+ output += (outputs.hidden_states,)
904
+ if output_attentions:
905
+ output += (outputs.attentions,)
906
+ return output
907
+
908
+ return BackboneOutput(
909
+ feature_maps=feature_maps,
910
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
911
+ attentions=outputs.attentions,
912
+ )
venv/lib/python3.10/site-packages/transformers/models/mega/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_mega"] = [
35
+ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "MegaForCausalLM",
37
+ "MegaForMaskedLM",
38
+ "MegaForMultipleChoice",
39
+ "MegaForQuestionAnswering",
40
+ "MegaForSequenceClassification",
41
+ "MegaForTokenClassification",
42
+ "MegaModel",
43
+ "MegaPreTrainedModel",
44
+ ]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .modeling_mega import (
56
+ MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
57
+ MegaForCausalLM,
58
+ MegaForMaskedLM,
59
+ MegaForMultipleChoice,
60
+ MegaForQuestionAnswering,
61
+ MegaForSequenceClassification,
62
+ MegaForTokenClassification,
63
+ MegaModel,
64
+ MegaPreTrainedModel,
65
+ )
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc ADDED
Binary file (69.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Mega Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MEGA configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class MegaConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the Mega
35
+ [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30522):
43
+ Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`MegaModel`].
45
+ hidden_size (`int`, *optional*, defaults to 128):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 4):
48
+ Number of hidden layers in the Mega encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 256):
50
+ Dimensionality of the hidden size (self-attention value projection) within the Mega encoder
51
+ ema_projection_size (`int`, *optional*, defaults to 16):
52
+ Dimensionality of the MegaMultiDimensionDampedEma
53
+ bidirectional (`bool`, *optional*, defaults to `True`):
54
+ Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`)
55
+ or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be
56
+ False if you intend to use the model as a decoder.
57
+ shared_representation_size (`int`, *optional*, defaults to 64):
58
+ Dimensionality of the linear projection for shared representation of self-attention queries and keys
59
+ use_chunking (`bool`, *optional*, defaults to `False`):
60
+ Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper)
61
+ chunk_size (`int`, *optional*, defaults to -1):
62
+ If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If
63
+ chunking is used, input sequences must be padded to a multiple of `chunk_size`
64
+ truncation (`int`, *optional*):
65
+ If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma
66
+ normalize_before_mega (`bool`, *optional*, defaults to `True`):
67
+ Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks
68
+ normalization_type (`str`, *optional*, defaults to `"scalenorm"`):
69
+ Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`,
70
+ `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm)
71
+ norm_affine (`bool`, *optional*, defaults to `True`):
72
+ If `True`, applies a parameterized affine transformation to inputs during normalization
73
+ activation (`str`, *optional*, defaults to `"silu"`):
74
+ Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`,
75
+ `"gelu"`, or `"gelu_accurate"`
76
+ attention_activation (`str`, *optional*, defaults to `"softmax"`):
77
+ Activation function to apply for single-headed self-attention (a la Transformer). Choose one of
78
+ `"softmax"`, `"laplace"`, or `"relu2"`
79
+ dropout_prob (`float`, *optional*, defaults to 0.1):
80
+ The dropout probability for EMA self-attention
81
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
82
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
83
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
84
+ The dropout ratio for the attention probabilities.
85
+ use_feature_dropout (`bool`, *optional*, defaults to `False`):
86
+ Whether to use feature-based (`True`) or standard dropout (`False`)
87
+ use_normalized_ffn (`bool`, *optional*, defaults to `True`):
88
+ Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output
89
+ as-is (`False`)
90
+ nffn_hidden_size (`int`, *optional*, defaults to 256):
91
+ If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this
92
+ is the hidden size of the NFFN
93
+ normalize_before_ffn (`bool`, *optional*, defaults to `True`):
94
+ Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN
95
+ nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1):
96
+ The dropout ratio for the NFFN component.
97
+ max_positions (`int`, *optional*, defaults to 2048):
98
+ The maximum sequence length to use for positional representations. For `"simple"` relative positional bias,
99
+ this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer
100
+ sequences
101
+ add_token_type_embeddings (`bool`, *optional*, defaults to `True`):
102
+ Whether to account for token types in embeddings. Left as optional to maintain compatibility with original
103
+ implementation while adding support for token types.
104
+ type_vocab_size (`int`, *optional*, defaults to 2):
105
+ The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if
106
+ `add_token_type_embeddings = True`
107
+ initializer_range (`float`, *optional*, defaults to 0.02):
108
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
109
+ ema_delta_alpha_range (`float`, *optional*, defaults to 0.2):
110
+ The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in
111
+ MegaMultiDimensionDampedEma.
112
+ ema_beta_range (`float`, *optional*, defaults to 0.02):
113
+ The standard deviation for initializing the beta parameter (expansion matrix) in
114
+ MegaMultiDimensionDampedEma.
115
+ ema_gamma_omega_range (`float`, *optional*, defaults to 1.0):
116
+ The standard deviation for initializing the gamma (projection matrix) and omega (residual weight)
117
+ parameters in MultiDimensionEMA.
118
+ relative_positional_bias (`str`, *optional*, defaults to `"rotary"`):
119
+ Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected,
120
+ `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`.
121
+ is_decoder (`bool`, *optional*, defaults to `False`):
122
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
123
+ use_cache (`bool`, *optional*, defaults to `True`):
124
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
125
+ relevant if `config.is_decoder=True`.
126
+ classifier_dropout (`float`, *optional*):
127
+ The dropout ratio for the classification head.
128
+ add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`):
129
+ Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass
130
+ hidden states directly to LM head (`False`). Remains optional for compatibility with original
131
+ implementation
132
+
133
+ Examples:
134
+
135
+ ```python
136
+ >>> from transformers import MegaConfig, MegaModel
137
+
138
+ >>> # Initializing a Mega configuration
139
+ >>> configuration = MegaConfig()
140
+
141
+ >>> # Initializing a model (with random weights) from the configuration
142
+ >>> model = MegaModel(configuration)
143
+
144
+ >>> # Accessing the model configuration
145
+ >>> configuration = model.config
146
+ ```"""
147
+
148
+ model_type = "mega"
149
+
150
+ def __init__(
151
+ self,
152
+ vocab_size=30522,
153
+ hidden_size=128,
154
+ num_hidden_layers=4,
155
+ intermediate_size=256,
156
+ ema_projection_size=16,
157
+ bidirectional=True,
158
+ shared_representation_size=64,
159
+ use_chunking=False,
160
+ chunk_size=-1,
161
+ truncation=None,
162
+ normalize_before_mega=True,
163
+ normalization_type="scalenorm",
164
+ norm_affine=True,
165
+ activation="silu",
166
+ attention_activation="softmax",
167
+ dropout_prob=0.1,
168
+ hidden_dropout_prob=0.1,
169
+ attention_probs_dropout_prob=0.1,
170
+ use_feature_dropout=False,
171
+ use_normalized_ffn=True,
172
+ nffn_hidden_size=256,
173
+ normalize_before_ffn=True,
174
+ nffn_activation_dropout_prob=0.1,
175
+ max_positions=2048,
176
+ add_token_type_embeddings=False,
177
+ type_vocab_size=2,
178
+ initializer_range=0.02,
179
+ ema_delta_alpha_range=0.2,
180
+ ema_beta_range=0.02,
181
+ ema_gamma_omega_range=1.0,
182
+ pad_token_id=1,
183
+ bos_token_id=0,
184
+ eos_token_id=2,
185
+ relative_positional_bias="rotary",
186
+ classifier_dropout=None,
187
+ use_cache=True,
188
+ add_lm_hidden_dense_layer=True,
189
+ **kwargs,
190
+ ):
191
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
192
+
193
+ self.vocab_size = vocab_size
194
+ self.hidden_size = hidden_size
195
+ self.num_hidden_layers = num_hidden_layers
196
+ self.activation = activation
197
+ self.attention_activation = attention_activation
198
+ self.intermediate_size = intermediate_size
199
+ self.ema_projection_size = ema_projection_size
200
+ self.bidirectional = bidirectional
201
+ self.shared_representation_size = shared_representation_size
202
+ self.use_chunking = use_chunking
203
+ self.chunk_size = chunk_size
204
+ self.truncation = truncation
205
+ self.normalize_before_mega = normalize_before_mega
206
+ self.normalization_type = normalization_type
207
+ self.norm_affine = norm_affine
208
+ self.dropout_prob = dropout_prob
209
+ self.hidden_dropout_prob = hidden_dropout_prob
210
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
211
+ self.use_feature_dropout = use_feature_dropout
212
+ self.use_normalized_ffn = use_normalized_ffn
213
+ self.nffn_hidden_size = nffn_hidden_size
214
+ self.normalize_before_ffn = normalize_before_ffn
215
+ self.nffn_activation_dropout_prob = nffn_activation_dropout_prob
216
+ self.max_positions = max_positions
217
+ self.add_token_type_embeddings = add_token_type_embeddings
218
+ self.type_vocab_size = type_vocab_size
219
+ self.initializer_range = initializer_range
220
+ self.ema_delta_alpha_range = ema_delta_alpha_range
221
+ self.ema_beta_range = ema_beta_range
222
+ self.ema_gamma_omega_range = ema_gamma_omega_range
223
+ self.relative_positional_bias = relative_positional_bias
224
+ self.use_cache = use_cache
225
+ self.classifier_dropout = classifier_dropout
226
+ self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer
227
+ self.num_attention_heads = 1 # not used but required by Hugging Face
228
+
229
+
230
+ class MegaOnnxConfig(OnnxConfig):
231
+ @property
232
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
233
+ if self.task == "multiple-choice":
234
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
235
+ else:
236
+ dynamic_axis = {0: "batch", 1: "sequence"}
237
+ return OrderedDict(
238
+ [
239
+ ("input_ids", dynamic_axis),
240
+ ("attention_mask", dynamic_axis),
241
+ ]
242
+ )
venv/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Convert Mega pretrained checkpoint. Built to convert the Masked LM checkpoint located at
18
+ https://huggingface.co/mnaylor/mega-wikitext-103
19
+
20
+ Requirements:
21
+ - clone the Mega repo and install fairseq from there
22
+ 1. git clone https://github.com/facebookresearch/mega.git
23
+ 2. cd mega && pip install -e
24
+ - clone the pretrained weights for the original implementation from the hugging face repo
25
+ * use this location as the path for pretrained weights
26
+ """
27
+ import argparse
28
+
29
+ # utilities to import the model weights and config file
30
+ import os
31
+ import pickle as pkl
32
+
33
+ # PyTorch + new model classes
34
+ import torch
35
+ from torch import nn
36
+
37
+ from transformers import AutoTokenizer, MegaConfig, MegaForMaskedLM
38
+
39
+
40
+ # import the EncoderLayer class used to pretrain
41
+ # !! NOTE !! this requires the version of fairseq that is built when you install the Mega source
42
+ try:
43
+ from fairseq.modules.mega_layer import MegaEncoderLayer
44
+ except ImportError:
45
+ raise ImportError("You need to install the version of fairseq from the Mega repo!")
46
+
47
+
48
+ # define the wrapper classes used to train the MLM (see colab notebook below)
49
+ # https://colab.research.google.com/drive/1qfUO6o5HRdxBblWlw058HVyvaEPhPpH8?usp=sharing
50
+ # MegaLM outputs hidden states
51
+ class MegaLM(nn.Module):
52
+ "The base class for our Mega encoder - given input IDs, embed text and return encoder output"
53
+
54
+ def __init__(self, mega_args, depth, vocab_size):
55
+ super().__init__()
56
+ self.mega_args = mega_args
57
+ self.embedding_layer = nn.Embedding(vocab_size, self.mega_args.encoder_embed_dim)
58
+ self.encoders = nn.ModuleList([MegaEncoderLayer(self.mega_args) for _ in range(depth)])
59
+ self.depth = depth
60
+
61
+ def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0):
62
+ """
63
+ Code for a forward pass - expects input_ids and attention_mask to come from a Hugging Face tokenizer as PyTorch
64
+ tensors, and returns a tensor of size (batch, n_classes) containing classification logits
65
+
66
+ Other options:
67
+ - batch_first: boolean indicating whether the batch dimension is first in input_ids (default: True, which
68
+ aligns with the HF tokenizer behavior)
69
+ - ignore_mask_value: the value in attention_mask that identifies tokens that should be ignored (default: 0,
70
+ which aligns with HF tokenizer)
71
+ """
72
+
73
+ # Mega expects embeddings to be (time, batch, embedding size), but
74
+ # Hugging Face returns tokens as (batch, time)
75
+ if batch_first:
76
+ input_ids = input_ids.T
77
+
78
+ # to make things more confusing, Mega expects the attention mask to
79
+ # be (batch, time), but with values of 0 (normal token) and 1 (ignore token)
80
+ # which is the opposite of what HF returns
81
+ if ignore_mask_value == 0:
82
+ attention_mask = 1 - attention_mask
83
+
84
+ # get token embeddings from IDs
85
+ embeds = self.embedding_layer(input_ids)
86
+
87
+ # pass through the Mega layers
88
+ # input is (time, batch, encoder dim) and output is the same
89
+ for encoder in self.encoders:
90
+ embeds = encoder(embeds, attention_mask)
91
+
92
+ # return according to the shape specified
93
+ if batch_first:
94
+ # (T, B, H) --> (B, T, H)
95
+ return torch.transpose(embeds, 0, 1)
96
+ else:
97
+ return embeds
98
+
99
+
100
+ # renamed from MegaForMaskedLM to avoid confusion with new module
101
+ class OriginalMegaForMaskedLM(nn.Module):
102
+ "A wrapper class for doing masked language modeling with Mega"
103
+
104
+ def __init__(self, mega_args, depth, vocab_size):
105
+ super().__init__()
106
+ self.mega = MegaLM(mega_args, depth, vocab_size)
107
+ self.mlm_head = nn.Linear(mega_args.encoder_embed_dim, vocab_size)
108
+ self.dropout = nn.Dropout(p=0.1)
109
+
110
+ def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0):
111
+ """
112
+ Perform a forward pass through the Mega encoder and the masked LM head. Returns logits for each vocabulary
113
+ entry.
114
+
115
+ If `batch_first` (default to align with Hugging Face tokenizer behavior), output will have the shape (Batch
116
+ size, Sequence length, Vocab size); otherwise (S, B, V)
117
+ """
118
+ encoder_output = self.mega(input_ids, attention_mask, batch_first, ignore_mask_value)
119
+ return self.mlm_head(self.dropout(encoder_output))
120
+
121
+
122
+ # code to convert the checkpoint located in the user-specified location
123
+ def convert_checkpoint_to_huggingface(pretrained_checkpoint_path, output_path, includes_tokenizer):
124
+ with open(os.path.join(pretrained_checkpoint_path, "model_args.pkl"), "rb") as f:
125
+ mega_original_args = pkl.load(f)
126
+
127
+ # load the original encoder
128
+ original_mlm = OriginalMegaForMaskedLM(**mega_original_args).eval()
129
+
130
+ # load its weights
131
+ print(
132
+ "Original Mega encoder:",
133
+ original_mlm.mega.load_state_dict(
134
+ torch.load(os.path.join(pretrained_checkpoint_path, "encoder_weights.pt"), map_location="cpu")
135
+ ),
136
+ )
137
+ print(
138
+ "Original Mega MLM layer:",
139
+ original_mlm.mlm_head.load_state_dict(
140
+ torch.load(os.path.join(pretrained_checkpoint_path, "mlm_head_weights.pt"), map_location="cpu")
141
+ ),
142
+ )
143
+
144
+ # create a new config from the old one
145
+ hf_config = MegaConfig(
146
+ num_hidden_layers=mega_original_args["depth"],
147
+ vocab_size=mega_original_args["vocab_size"],
148
+ hidden_size=mega_original_args["mega_args"].encoder_embed_dim,
149
+ shared_representation_size=mega_original_args["mega_args"].encoder_z_dim,
150
+ intermediate_size=mega_original_args["mega_args"].encoder_hidden_dim,
151
+ ema_projection_size=mega_original_args["mega_args"].encoder_n_dim,
152
+ dropout_prob=mega_original_args["mega_args"].dropout,
153
+ attention_probs_dropout_prob=mega_original_args["mega_args"].attention_dropout,
154
+ hidden_dropout_prob=mega_original_args["mega_args"].hidden_dropout,
155
+ activation=mega_original_args["mega_args"].activation_fn,
156
+ attention_activation=mega_original_args["mega_args"].attention_activation_fn,
157
+ bidirectional=mega_original_args["mega_args"].bidirectional,
158
+ use_chunking=mega_original_args["mega_args"].encoder_chunk_size > 0,
159
+ chunk_size=mega_original_args["mega_args"].encoder_chunk_size,
160
+ truncation=mega_original_args["mega_args"].truncation_length,
161
+ normalization_type=mega_original_args["mega_args"].normalization_type,
162
+ normalize_before_mega=True,
163
+ norm_affine=True,
164
+ use_feature_dropout=mega_original_args["mega_args"].feature_dropout,
165
+ relative_positional_bias=mega_original_args["mega_args"].rel_pos_bias,
166
+ max_positions=mega_original_args["mega_args"].max_source_positions,
167
+ nffn_hidden_size=mega_original_args["mega_args"].encoder_ffn_embed_dim,
168
+ normalize_before_ffn=mega_original_args["mega_args"].normalize_before,
169
+ # new arguments added for HF implementation
170
+ nffn_activation_dropout_prob=0.0,
171
+ add_token_type_embeddings=False,
172
+ add_lm_hidden_dense_layer=False,
173
+ )
174
+
175
+ hf_mlm = MegaForMaskedLM(hf_config).eval()
176
+
177
+ # the originl checkpoint just uses nn.Embedding for the word embeddings
178
+ # we use a wrapper module for embeddings to add support for positional embeddings
179
+ hf_mlm.mega.embedding_layer.word_embeddings.weight = original_mlm.mega.embedding_layer.weight
180
+
181
+ # modify the state dictionary of the original checkpoint to account for naming issues in the Hugging Face
182
+ # ecosystem -- any names containing "beta" or "gamma" aren't safe to use and are renamed upon _load_pretrained,
183
+ # also renaming previously confusing parameter names
184
+ original_state_dict = original_mlm.mega.encoders.state_dict()
185
+ updated_keys = {}
186
+ for module_name in original_state_dict.keys():
187
+ new_module_name = None
188
+ # have to handle gamma, beta, and alpha differently due to their use
189
+ # in multiple modules within the original repository;
190
+ # beta is used in EMA, MovingAverageGatedAttention, and RotaryRelativePositionalBias, and must be renamed due to flax/tf weights
191
+ # the EMA sublayer was renamed from "move" to "ema_gate" for readability, so that is also done here
192
+ if "beta" in module_name:
193
+ # EMA sub-layers were always called "move" in the original repo
194
+ if "move.beta" in module_name:
195
+ new_module_name = module_name.replace("move.beta", "ema_gate.ema_expansion_matrix")
196
+ elif "mega_layer.beta" in module_name:
197
+ new_module_name = module_name.replace("beta", "qk_bias")
198
+ else:
199
+ new_module_name = module_name.replace("beta", "b_param")
200
+ # beta is used in EMA and MovingAverageGatedAttention, and must be renamed due to flax/tf weights
201
+ elif "gamma" in module_name:
202
+ if "move.gamma" in module_name:
203
+ new_module_name = module_name.replace("move.gamma", "ema_gate.kernel_projection_matrix")
204
+ elif "mega_layer.gamma" in module_name:
205
+ new_module_name = module_name.replace("gamma", "qk_weight")
206
+ else:
207
+ new_module_name = module_name.replace("gamma", "g_param")
208
+ # alpha is used in EMA and positional bias; renaming to improve readability
209
+ elif "move.alpha" in module_name:
210
+ new_module_name = module_name.replace("move.alpha", "ema_gate.decay_factor")
211
+ # delta is only used in EMA; renaming to improve readability
212
+ elif "move.delta" in module_name:
213
+ new_module_name = module_name.replace("move.delta", "ema_gate.damping_factor")
214
+ # omega is only used in EMA; renaming to improve readability
215
+ elif "omega" in module_name:
216
+ new_module_name = module_name.replace("move.omega", "ema_gate.residual_weight")
217
+
218
+ if new_module_name:
219
+ updated_keys[module_name] = new_module_name
220
+
221
+ if len(updated_keys) != 0:
222
+ print(f"Renaming these keys: {updated_keys.keys()}")
223
+ else:
224
+ print("No need to rename state dict entries")
225
+ for old, new in updated_keys.items():
226
+ original_state_dict[new] = original_state_dict.pop(old)
227
+
228
+ # now attempt to load the state dictionary with updated names
229
+ # note that we now call it `mega.layers` instead of `mega.encoders` due to hugging face style
230
+ print("HF Mega encoder:", hf_mlm.mega.layers.load_state_dict(original_state_dict))
231
+
232
+ # load the MLM head weights directly
233
+ print(
234
+ "HF Mega MLM layer:",
235
+ hf_mlm.mlm_head.load_state_dict(
236
+ torch.load(os.path.join(pretrained_checkpoint_path, "mlm_head_weights.pt"), map_location="cpu")
237
+ ),
238
+ )
239
+
240
+ # test on a randomly generated input sequence
241
+ input_ids = torch.randint(0, hf_config.vocab_size, size=(4, 256))
242
+ input_mask = torch.ones_like(input_ids)
243
+ # mask a few tokens to make sure masking is applied appropriately :)
244
+ input_mask[:, -10:] = 0
245
+
246
+ # run forward passes
247
+ original_output = original_mlm(input_ids, input_mask, batch_first=True, ignore_mask_value=0)
248
+ hf_output = hf_mlm(input_ids, input_mask)[0]
249
+
250
+ # print shapes and diff
251
+ print(f"original output {original_output.shape}")
252
+ print(f"hf output {hf_output.shape}")
253
+ print(f"max diff: {(original_output - hf_output).max()}") # 0.0
254
+ success = torch.allclose(original_output, hf_output, atol=1e-3)
255
+
256
+ if success:
257
+ print("Yay!")
258
+ hf_mlm.save_pretrained(output_path)
259
+ else:
260
+ raise RuntimeError(f"Something's broken :(\nOriginal:\n{original_output}\n\nHF\n{hf_output}\n{hf_mlm}")
261
+
262
+ if includes_tokenizer:
263
+ print("Transferring tokenizer")
264
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_checkpoint_path)
265
+ tokenizer.save_pretrained(output_path)
266
+
267
+
268
+ if __name__ == "__main__":
269
+ parser = argparse.ArgumentParser()
270
+
271
+ parser.add_argument(
272
+ "--pretrained_checkpoint_path",
273
+ default=None,
274
+ type=str,
275
+ required=True,
276
+ help="Point to the directory containing your model weights using the official Mega repo",
277
+ )
278
+
279
+ parser.add_argument(
280
+ "--output_path", default=None, type=str, required=True, help="Location to save the Hugging Face version"
281
+ )
282
+
283
+ parser.add_argument(
284
+ "--includes_tokenizer",
285
+ action="store_true",
286
+ help="Use this flag if there is a Hugging Face tokenizer in the original checkpoint repo",
287
+ )
288
+
289
+ args = parser.parse_args()
290
+
291
+ convert_checkpoint_to_huggingface(args.pretrained_checkpoint_path, args.output_path, args.includes_tokenizer)
venv/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_oneformer": ["ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "OneFormerConfig"],
21
+ "processing_oneformer": ["OneFormerProcessor"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_oneformer"] = ["OneFormerImageProcessor"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_oneformer"] = [
39
+ "ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "OneFormerForUniversalSegmentation",
41
+ "OneFormerModel",
42
+ "OneFormerPreTrainedModel",
43
+ ]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_oneformer import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, OneFormerConfig
47
+ from .processing_oneformer import OneFormerProcessor
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_oneformer import OneFormerImageProcessor
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_oneformer import (
63
+ ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ OneFormerForUniversalSegmentation,
65
+ OneFormerModel,
66
+ OneFormerPreTrainedModel,
67
+ )
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc ADDED
Binary file (31.7 kB). View file