applied-ai-018 commited on
Commit
4776532
·
verified ·
1 Parent(s): 0be9bb4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/transformers/models/__init__.py +273 -0
  5. venv/lib/python3.10/site-packages/transformers/models/bit/__init__.py +73 -0
  6. venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py +136 -0
  12. venv/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py +178 -0
  13. venv/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py +345 -0
  14. venv/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py +898 -0
  15. venv/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py +61 -0
  16. venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py +175 -0
  21. venv/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py +287 -0
  22. venv/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py +856 -0
  23. venv/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py +166 -0
  24. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py +140 -0
  32. venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py +1384 -0
  33. venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py +895 -0
  34. venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py +1139 -0
  35. venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py +514 -0
  36. venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py +176 -0
  37. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py +82 -0
  38. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py +106 -0
  44. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py +693 -0
  45. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +899 -0
  46. venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +663 -0
  47. venv/lib/python3.10/site-packages/transformers/models/ibert/__init__.py +62 -0
  48. venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/configuration_ibert.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/modeling_ibert.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c58d9a4ba78bf6b0804db683329e66eb8ae93b01ff31f4f020feb4fad4514ee
3
+ size 33555612
ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce326dc8c6d418331420b423287b45bfa55d60a768a3d87f44baf2125a288035
3
+ size 33555627
ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:058ce3d1d39eba8b80c35b26ea61854f1330e05b249b0e8edb3e8a41ed44893d
3
+ size 33555533
venv/lib/python3.10/site-packages/transformers/models/__init__.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from . import (
16
+ albert,
17
+ align,
18
+ altclip,
19
+ audio_spectrogram_transformer,
20
+ auto,
21
+ autoformer,
22
+ bark,
23
+ bart,
24
+ barthez,
25
+ bartpho,
26
+ beit,
27
+ bert,
28
+ bert_generation,
29
+ bert_japanese,
30
+ bertweet,
31
+ big_bird,
32
+ bigbird_pegasus,
33
+ biogpt,
34
+ bit,
35
+ blenderbot,
36
+ blenderbot_small,
37
+ blip,
38
+ blip_2,
39
+ bloom,
40
+ bridgetower,
41
+ bros,
42
+ byt5,
43
+ camembert,
44
+ canine,
45
+ chinese_clip,
46
+ clap,
47
+ clip,
48
+ clipseg,
49
+ clvp,
50
+ code_llama,
51
+ codegen,
52
+ cohere,
53
+ conditional_detr,
54
+ convbert,
55
+ convnext,
56
+ convnextv2,
57
+ cpm,
58
+ cpmant,
59
+ ctrl,
60
+ cvt,
61
+ data2vec,
62
+ dbrx,
63
+ deberta,
64
+ deberta_v2,
65
+ decision_transformer,
66
+ deformable_detr,
67
+ deit,
68
+ deprecated,
69
+ depth_anything,
70
+ deta,
71
+ detr,
72
+ dialogpt,
73
+ dinat,
74
+ dinov2,
75
+ distilbert,
76
+ dit,
77
+ donut,
78
+ dpr,
79
+ dpt,
80
+ efficientformer,
81
+ efficientnet,
82
+ electra,
83
+ encodec,
84
+ encoder_decoder,
85
+ ernie,
86
+ ernie_m,
87
+ esm,
88
+ falcon,
89
+ fastspeech2_conformer,
90
+ flaubert,
91
+ flava,
92
+ fnet,
93
+ focalnet,
94
+ fsmt,
95
+ funnel,
96
+ fuyu,
97
+ gemma,
98
+ git,
99
+ glpn,
100
+ gpt2,
101
+ gpt_bigcode,
102
+ gpt_neo,
103
+ gpt_neox,
104
+ gpt_neox_japanese,
105
+ gpt_sw3,
106
+ gptj,
107
+ gptsan_japanese,
108
+ graphormer,
109
+ grounding_dino,
110
+ groupvit,
111
+ herbert,
112
+ hubert,
113
+ ibert,
114
+ idefics,
115
+ idefics2,
116
+ imagegpt,
117
+ informer,
118
+ instructblip,
119
+ jamba,
120
+ jukebox,
121
+ kosmos2,
122
+ layoutlm,
123
+ layoutlmv2,
124
+ layoutlmv3,
125
+ layoutxlm,
126
+ led,
127
+ levit,
128
+ lilt,
129
+ llama,
130
+ llava,
131
+ llava_next,
132
+ longformer,
133
+ longt5,
134
+ luke,
135
+ lxmert,
136
+ m2m_100,
137
+ mamba,
138
+ marian,
139
+ markuplm,
140
+ mask2former,
141
+ maskformer,
142
+ mbart,
143
+ mbart50,
144
+ mega,
145
+ megatron_bert,
146
+ megatron_gpt2,
147
+ mgp_str,
148
+ mistral,
149
+ mixtral,
150
+ mluke,
151
+ mobilebert,
152
+ mobilenet_v1,
153
+ mobilenet_v2,
154
+ mobilevit,
155
+ mobilevitv2,
156
+ mpnet,
157
+ mpt,
158
+ mra,
159
+ mt5,
160
+ musicgen,
161
+ musicgen_melody,
162
+ mvp,
163
+ nat,
164
+ nezha,
165
+ nllb,
166
+ nllb_moe,
167
+ nougat,
168
+ nystromformer,
169
+ olmo,
170
+ oneformer,
171
+ openai,
172
+ opt,
173
+ owlv2,
174
+ owlvit,
175
+ patchtsmixer,
176
+ patchtst,
177
+ pegasus,
178
+ pegasus_x,
179
+ perceiver,
180
+ persimmon,
181
+ phi,
182
+ phobert,
183
+ pix2struct,
184
+ plbart,
185
+ poolformer,
186
+ pop2piano,
187
+ prophetnet,
188
+ pvt,
189
+ pvt_v2,
190
+ qdqbert,
191
+ qwen2,
192
+ qwen2_moe,
193
+ rag,
194
+ realm,
195
+ recurrent_gemma,
196
+ reformer,
197
+ regnet,
198
+ rembert,
199
+ resnet,
200
+ roberta,
201
+ roberta_prelayernorm,
202
+ roc_bert,
203
+ roformer,
204
+ rwkv,
205
+ sam,
206
+ seamless_m4t,
207
+ seamless_m4t_v2,
208
+ segformer,
209
+ seggpt,
210
+ sew,
211
+ sew_d,
212
+ siglip,
213
+ speech_encoder_decoder,
214
+ speech_to_text,
215
+ speech_to_text_2,
216
+ speecht5,
217
+ splinter,
218
+ squeezebert,
219
+ stablelm,
220
+ starcoder2,
221
+ superpoint,
222
+ swiftformer,
223
+ swin,
224
+ swin2sr,
225
+ swinv2,
226
+ switch_transformers,
227
+ t5,
228
+ table_transformer,
229
+ tapas,
230
+ time_series_transformer,
231
+ timesformer,
232
+ timm_backbone,
233
+ trocr,
234
+ tvlt,
235
+ tvp,
236
+ udop,
237
+ umt5,
238
+ unispeech,
239
+ unispeech_sat,
240
+ univnet,
241
+ upernet,
242
+ videomae,
243
+ vilt,
244
+ vipllava,
245
+ vision_encoder_decoder,
246
+ vision_text_dual_encoder,
247
+ visual_bert,
248
+ vit,
249
+ vit_hybrid,
250
+ vit_mae,
251
+ vit_msn,
252
+ vitdet,
253
+ vitmatte,
254
+ vits,
255
+ vivit,
256
+ wav2vec2,
257
+ wav2vec2_bert,
258
+ wav2vec2_conformer,
259
+ wav2vec2_phoneme,
260
+ wav2vec2_with_lm,
261
+ wavlm,
262
+ whisper,
263
+ x_clip,
264
+ xglm,
265
+ xlm,
266
+ xlm_prophetnet,
267
+ xlm_roberta,
268
+ xlm_roberta_xl,
269
+ xlnet,
270
+ xmod,
271
+ yolos,
272
+ yoso,
273
+ )
venv/lib/python3.10/site-packages/transformers/models/bit/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_bit"] = [
28
+ "BIT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "BitForImageClassification",
30
+ "BitModel",
31
+ "BitPreTrainedModel",
32
+ "BitBackbone",
33
+ ]
34
+
35
+
36
+ try:
37
+ if not is_vision_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["image_processing_bit"] = ["BitImageProcessor"]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_bit import (
55
+ BIT_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ BitBackbone,
57
+ BitForImageClassification,
58
+ BitModel,
59
+ BitPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .image_processing_bit import BitImageProcessor
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc ADDED
Binary file (5.57 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BiT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class BitConfig(BackboneConfigMixin, PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the BiT
33
+ [google/bit-50](https://huggingface.co/google/bit-50) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ num_channels (`int`, *optional*, defaults to 3):
40
+ The number of input channels.
41
+ embedding_size (`int`, *optional*, defaults to 64):
42
+ Dimensionality (hidden size) for the embedding layer.
43
+ hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
44
+ Dimensionality (hidden size) at each stage.
45
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
46
+ Depth (number of layers) for each stage.
47
+ layer_type (`str`, *optional*, defaults to `"preactivation"`):
48
+ The layer to use, it can be either `"preactivation"` or `"bottleneck"`.
49
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
50
+ The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
51
+ are supported.
52
+ global_padding (`str`, *optional*):
53
+ Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`.
54
+ num_groups (`int`, *optional*, defaults to 32):
55
+ Number of groups used for the `BitGroupNormActivation` layers.
56
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
57
+ The drop path rate for the stochastic depth.
58
+ embedding_dynamic_padding (`bool`, *optional*, defaults to `False`):
59
+ Whether or not to make use of dynamic padding for the embedding layer.
60
+ output_stride (`int`, *optional*, defaults to 32):
61
+ The output stride of the model.
62
+ width_factor (`int`, *optional*, defaults to 1):
63
+ The width factor for the model.
64
+ out_features (`List[str]`, *optional*):
65
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
66
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
67
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
68
+ same order as defined in the `stage_names` attribute.
69
+ out_indices (`List[int]`, *optional*):
70
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
71
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
72
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
73
+ same order as defined in the `stage_names` attribute.
74
+
75
+ Example:
76
+ ```python
77
+ >>> from transformers import BitConfig, BitModel
78
+
79
+ >>> # Initializing a BiT bit-50 style configuration
80
+ >>> configuration = BitConfig()
81
+
82
+ >>> # Initializing a model (with random weights) from the bit-50 style configuration
83
+ >>> model = BitModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```
88
+ """
89
+
90
+ model_type = "bit"
91
+ layer_types = ["preactivation", "bottleneck"]
92
+ supported_padding = ["SAME", "VALID"]
93
+
94
+ def __init__(
95
+ self,
96
+ num_channels=3,
97
+ embedding_size=64,
98
+ hidden_sizes=[256, 512, 1024, 2048],
99
+ depths=[3, 4, 6, 3],
100
+ layer_type="preactivation",
101
+ hidden_act="relu",
102
+ global_padding=None,
103
+ num_groups=32,
104
+ drop_path_rate=0.0,
105
+ embedding_dynamic_padding=False,
106
+ output_stride=32,
107
+ width_factor=1,
108
+ out_features=None,
109
+ out_indices=None,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(**kwargs)
113
+ if layer_type not in self.layer_types:
114
+ raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
115
+ if global_padding is not None:
116
+ if global_padding.upper() in self.supported_padding:
117
+ global_padding = global_padding.upper()
118
+ else:
119
+ raise ValueError(f"Padding strategy {global_padding} not supported")
120
+ self.num_channels = num_channels
121
+ self.embedding_size = embedding_size
122
+ self.hidden_sizes = hidden_sizes
123
+ self.depths = depths
124
+ self.layer_type = layer_type
125
+ self.hidden_act = hidden_act
126
+ self.global_padding = global_padding
127
+ self.num_groups = num_groups
128
+ self.drop_path_rate = drop_path_rate
129
+ self.embedding_dynamic_padding = embedding_dynamic_padding
130
+ self.output_stride = output_stride
131
+ self.width_factor = width_factor
132
+
133
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
134
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
135
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
136
+ )
venv/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BiT checkpoints from the timm library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+ from timm import create_model
27
+ from timm.data import resolve_data_config
28
+ from timm.data.transforms_factory import create_transform
29
+
30
+ from transformers import BitConfig, BitForImageClassification, BitImageProcessor
31
+ from transformers.image_utils import PILImageResampling
32
+ from transformers.utils import logging
33
+
34
+
35
+ logging.set_verbosity_info()
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ def get_config(model_name):
40
+ repo_id = "huggingface/label-files"
41
+ filename = "imagenet-1k-id2label.json"
42
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
43
+ id2label = {int(k): v for k, v in id2label.items()}
44
+ label2id = {v: k for k, v in id2label.items()}
45
+
46
+ conv_layer = "std_conv" if "bit" in model_name else False
47
+
48
+ # note that when using BiT as backbone for ViT-hybrid checkpoints,
49
+ # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
50
+ # config.conv_layer = "std_conv_same"
51
+ config = BitConfig(
52
+ conv_layer=conv_layer,
53
+ num_labels=1000,
54
+ id2label=id2label,
55
+ label2id=label2id,
56
+ )
57
+
58
+ return config
59
+
60
+
61
+ def rename_key(name):
62
+ if "stem.conv" in name:
63
+ name = name.replace("stem.conv", "bit.embedder.convolution")
64
+ if "blocks" in name:
65
+ name = name.replace("blocks", "layers")
66
+ if "head.fc" in name:
67
+ name = name.replace("head.fc", "classifier.1")
68
+ if name.startswith("norm"):
69
+ name = "bit." + name
70
+ if "bit" not in name and "classifier" not in name:
71
+ name = "bit.encoder." + name
72
+
73
+ return name
74
+
75
+
76
+ # We will verify our results on an image of cute cats
77
+ def prepare_img():
78
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
79
+ im = Image.open(requests.get(url, stream=True).raw)
80
+ return im
81
+
82
+
83
+ @torch.no_grad()
84
+ def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
85
+ """
86
+ Copy/paste/tweak model's weights to our BiT structure.
87
+ """
88
+
89
+ # define default BiT configuration
90
+ config = get_config(model_name)
91
+
92
+ # load original model from timm
93
+ timm_model = create_model(model_name, pretrained=True)
94
+ timm_model.eval()
95
+
96
+ # load state_dict of original model
97
+ state_dict = timm_model.state_dict()
98
+ for key in state_dict.copy().keys():
99
+ val = state_dict.pop(key)
100
+ state_dict[rename_key(key)] = val.squeeze() if "head" in key else val
101
+
102
+ # load HuggingFace model
103
+ model = BitForImageClassification(config)
104
+ model.eval()
105
+ model.load_state_dict(state_dict)
106
+
107
+ # create image processor
108
+ transform = create_transform(**resolve_data_config({}, model=timm_model))
109
+ timm_transforms = transform.transforms
110
+
111
+ pillow_resamplings = {
112
+ "bilinear": PILImageResampling.BILINEAR,
113
+ "bicubic": PILImageResampling.BICUBIC,
114
+ "nearest": PILImageResampling.NEAREST,
115
+ }
116
+
117
+ processor = BitImageProcessor(
118
+ do_resize=True,
119
+ size={"shortest_edge": timm_transforms[0].size},
120
+ resample=pillow_resamplings[timm_transforms[0].interpolation.value],
121
+ do_center_crop=True,
122
+ crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]},
123
+ do_normalize=True,
124
+ image_mean=timm_transforms[-1].mean.tolist(),
125
+ image_std=timm_transforms[-1].std.tolist(),
126
+ )
127
+
128
+ image = prepare_img()
129
+ timm_pixel_values = transform(image).unsqueeze(0)
130
+ pixel_values = processor(image, return_tensors="pt").pixel_values
131
+
132
+ # verify pixel values
133
+ assert torch.allclose(timm_pixel_values, pixel_values)
134
+
135
+ # verify logits
136
+ with torch.no_grad():
137
+ outputs = model(pixel_values)
138
+ logits = outputs.logits
139
+
140
+ print("Logits:", logits[0, :3])
141
+ print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
142
+ timm_logits = timm_model(pixel_values)
143
+ assert timm_logits.shape == outputs.logits.shape
144
+ assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
145
+ print("Looks ok!")
146
+
147
+ if pytorch_dump_folder_path is not None:
148
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
149
+ print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
150
+ model.save_pretrained(pytorch_dump_folder_path)
151
+ processor.save_pretrained(pytorch_dump_folder_path)
152
+
153
+ if push_to_hub:
154
+ print(f"Pushing model {model_name} and processor to the hub")
155
+ model.push_to_hub(f"ybelkada/{model_name}")
156
+ processor.push_to_hub(f"ybelkada/{model_name}")
157
+
158
+
159
+ if __name__ == "__main__":
160
+ parser = argparse.ArgumentParser()
161
+ # Required parameters
162
+ parser.add_argument(
163
+ "--model_name",
164
+ default="resnetv2_50x1_bitm",
165
+ type=str,
166
+ help="Name of the BiT timm model you'd like to convert.",
167
+ )
168
+ parser.add_argument(
169
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
170
+ )
171
+ parser.add_argument(
172
+ "--push_to_hub",
173
+ action="store_true",
174
+ help="Whether to push the model to the hub.",
175
+ )
176
+
177
+ args = parser.parse_args()
178
+ convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for BiT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ convert_to_rgb,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ OPENAI_CLIP_MEAN,
30
+ OPENAI_CLIP_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ if is_vision_available():
49
+ import PIL
50
+
51
+
52
+ class BitImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a BiT image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
59
+ `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
61
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
62
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
63
+ method.
64
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
65
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
68
+ `preprocess` method.
69
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
70
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
71
+ method.
72
+ do_rescale (`bool`, *optional*, defaults to `True`):
73
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
74
+ the `preprocess` method.
75
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
76
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
77
+ method.
78
+ do_normalize:
79
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
87
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
88
+ Whether to convert the image to RGB.
89
+ """
90
+
91
+ model_input_names = ["pixel_values"]
92
+
93
+ def __init__(
94
+ self,
95
+ do_resize: bool = True,
96
+ size: Dict[str, int] = None,
97
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
98
+ do_center_crop: bool = True,
99
+ crop_size: Dict[str, int] = None,
100
+ do_rescale: bool = True,
101
+ rescale_factor: Union[int, float] = 1 / 255,
102
+ do_normalize: bool = True,
103
+ image_mean: Optional[Union[float, List[float]]] = None,
104
+ image_std: Optional[Union[float, List[float]]] = None,
105
+ do_convert_rgb: bool = True,
106
+ **kwargs,
107
+ ) -> None:
108
+ super().__init__(**kwargs)
109
+ size = size if size is not None else {"shortest_edge": 224}
110
+ size = get_size_dict(size, default_to_square=False)
111
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
112
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
113
+
114
+ self.do_resize = do_resize
115
+ self.size = size
116
+ self.resample = resample
117
+ self.do_center_crop = do_center_crop
118
+ self.crop_size = crop_size
119
+ self.do_rescale = do_rescale
120
+ self.rescale_factor = rescale_factor
121
+ self.do_normalize = do_normalize
122
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
123
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
124
+ self.do_convert_rgb = do_convert_rgb
125
+ self._valid_processor_keys = [
126
+ "images",
127
+ "do_resize",
128
+ "size",
129
+ "resample",
130
+ "do_center_crop",
131
+ "crop_size",
132
+ "do_rescale",
133
+ "rescale_factor",
134
+ "do_normalize",
135
+ "image_mean",
136
+ "image_std",
137
+ "do_convert_rgb",
138
+ "return_tensors",
139
+ "data_format",
140
+ "input_data_format",
141
+ ]
142
+
143
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
144
+ def resize(
145
+ self,
146
+ image: np.ndarray,
147
+ size: Dict[str, int],
148
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
149
+ data_format: Optional[Union[str, ChannelDimension]] = None,
150
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
151
+ **kwargs,
152
+ ) -> np.ndarray:
153
+ """
154
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
155
+ resized to keep the input aspect ratio.
156
+
157
+ Args:
158
+ image (`np.ndarray`):
159
+ Image to resize.
160
+ size (`Dict[str, int]`):
161
+ Size of the output image.
162
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
163
+ Resampling filter to use when resiizing the image.
164
+ data_format (`str` or `ChannelDimension`, *optional*):
165
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
166
+ input_data_format (`ChannelDimension` or `str`, *optional*):
167
+ The channel dimension format of the input image. If not provided, it will be inferred.
168
+ """
169
+ default_to_square = True
170
+ if "shortest_edge" in size:
171
+ size = size["shortest_edge"]
172
+ default_to_square = False
173
+ elif "height" in size and "width" in size:
174
+ size = (size["height"], size["width"])
175
+ else:
176
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
177
+
178
+ output_size = get_resize_output_image_size(
179
+ image,
180
+ size=size,
181
+ default_to_square=default_to_square,
182
+ input_data_format=input_data_format,
183
+ )
184
+ return resize(
185
+ image,
186
+ size=output_size,
187
+ resample=resample,
188
+ data_format=data_format,
189
+ input_data_format=input_data_format,
190
+ **kwargs,
191
+ )
192
+
193
+ def preprocess(
194
+ self,
195
+ images: ImageInput,
196
+ do_resize: bool = None,
197
+ size: Dict[str, int] = None,
198
+ resample: PILImageResampling = None,
199
+ do_center_crop: bool = None,
200
+ crop_size: int = None,
201
+ do_rescale: bool = None,
202
+ rescale_factor: float = None,
203
+ do_normalize: bool = None,
204
+ image_mean: Optional[Union[float, List[float]]] = None,
205
+ image_std: Optional[Union[float, List[float]]] = None,
206
+ do_convert_rgb: bool = None,
207
+ return_tensors: Optional[Union[str, TensorType]] = None,
208
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
209
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
210
+ **kwargs,
211
+ ) -> PIL.Image.Image:
212
+ """
213
+ Preprocess an image or batch of images.
214
+
215
+ Args:
216
+ images (`ImageInput`):
217
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
218
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
219
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
220
+ Whether to resize the image.
221
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
222
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
223
+ the longest edge resized to keep the input aspect ratio.
224
+ resample (`int`, *optional*, defaults to `self.resample`):
225
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
226
+ has an effect if `do_resize` is set to `True`.
227
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
228
+ Whether to center crop the image.
229
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
230
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
231
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
232
+ Whether to rescale the image.
233
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
234
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
235
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
236
+ Whether to normalize the image.
237
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
238
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
239
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
240
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
241
+ `True`.
242
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
243
+ Whether to convert the image to RGB.
244
+ return_tensors (`str` or `TensorType`, *optional*):
245
+ The type of tensors to return. Can be one of:
246
+ - Unset: Return a list of `np.ndarray`.
247
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
248
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
249
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
250
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
251
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
252
+ The channel dimension format for the output image. Can be one of:
253
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
254
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
255
+ - Unset: Use the channel dimension format of the input image.
256
+ input_data_format (`ChannelDimension` or `str`, *optional*):
257
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
258
+ from the input image. Can be one of:
259
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
260
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
261
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
262
+ """
263
+ do_resize = do_resize if do_resize is not None else self.do_resize
264
+ size = size if size is not None else self.size
265
+ size = get_size_dict(size, param_name="size", default_to_square=False)
266
+ resample = resample if resample is not None else self.resample
267
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
268
+ crop_size = crop_size if crop_size is not None else self.crop_size
269
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
270
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
271
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
272
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
273
+ image_mean = image_mean if image_mean is not None else self.image_mean
274
+ image_std = image_std if image_std is not None else self.image_std
275
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
276
+
277
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
278
+
279
+ images = make_list_of_images(images)
280
+
281
+ if not valid_images(images):
282
+ raise ValueError(
283
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
284
+ "torch.Tensor, tf.Tensor or jax.ndarray."
285
+ )
286
+
287
+ validate_preprocess_arguments(
288
+ do_rescale=do_rescale,
289
+ rescale_factor=rescale_factor,
290
+ do_normalize=do_normalize,
291
+ image_mean=image_mean,
292
+ image_std=image_std,
293
+ do_center_crop=do_center_crop,
294
+ crop_size=crop_size,
295
+ do_resize=do_resize,
296
+ size=size,
297
+ resample=resample,
298
+ )
299
+
300
+ # PIL RGBA images are converted to RGB
301
+ if do_convert_rgb:
302
+ images = [convert_to_rgb(image) for image in images]
303
+
304
+ # All transformations expect numpy arrays.
305
+ images = [to_numpy_array(image) for image in images]
306
+
307
+ if is_scaled_image(images[0]) and do_rescale:
308
+ logger.warning_once(
309
+ "It looks like you are trying to rescale already rescaled images. If the input"
310
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
311
+ )
312
+
313
+ if input_data_format is None:
314
+ # We assume that all images have the same channel dimension format.
315
+ input_data_format = infer_channel_dimension_format(images[0])
316
+
317
+ if do_resize:
318
+ images = [
319
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
320
+ for image in images
321
+ ]
322
+
323
+ if do_center_crop:
324
+ images = [
325
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
326
+ ]
327
+
328
+ if do_rescale:
329
+ images = [
330
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
331
+ for image in images
332
+ ]
333
+
334
+ if do_normalize:
335
+ images = [
336
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
337
+ for image in images
338
+ ]
339
+
340
+ images = [
341
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
342
+ ]
343
+
344
+ data = {"pixel_values": images}
345
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py ADDED
@@ -0,0 +1,898 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BiT model. Also supports backbone for ViT hybrid."""
16
+
17
+ import collections
18
+ import math
19
+ from typing import Optional, Tuple
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import Tensor, nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BackboneOutput,
30
+ BaseModelOutputWithNoAttention,
31
+ BaseModelOutputWithPoolingAndNoAttention,
32
+ ImageClassifierOutputWithNoAttention,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from ...utils.backbone_utils import BackboneMixin
43
+ from .configuration_bit import BitConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "BitConfig"
50
+
51
+ # Base docstring
52
+ _CHECKPOINT_FOR_DOC = "google/bit-50"
53
+ _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
54
+
55
+ # Image classification docstring
56
+ _IMAGE_CLASS_CHECKPOINT = "google/bit-50"
57
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
58
+
59
+
60
+ from ..deprecated._archive_maps import BIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
61
+
62
+
63
+ def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]:
64
+ r"""
65
+ Utility function to get the tuple padding value given the kernel_size and padding.
66
+
67
+ Args:
68
+ padding (Union[`str`, `int`], *optional*):
69
+ Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from
70
+ PyTorch is used.
71
+ kernel_size (`int`, *optional*, defaults to 7):
72
+ Kernel size of the convolution layers.
73
+ stride (`int`, *optional*, defaults to 1):
74
+ Stride value of the convolution layers.
75
+ dilation (`int`, *optional*, defaults to 1):
76
+ Dilation value of the convolution layers.
77
+ """
78
+ dynamic = False
79
+ if padding is None:
80
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
81
+ return padding, dynamic
82
+
83
+ if isinstance(padding, str):
84
+ # for any string padding, the padding will be calculated for you, one of three ways
85
+ padding = padding.lower()
86
+ if padding == "same":
87
+ # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
88
+ if stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0:
89
+ # static case, no extra overhead
90
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
91
+ else:
92
+ # dynamic 'SAME' padding, has runtime/GPU memory overhead
93
+ padding = 0
94
+ dynamic = True
95
+ elif padding == "valid":
96
+ # 'VALID' padding, same as padding=0
97
+ padding = 0
98
+ else:
99
+ # Default to PyTorch style 'same'-ish symmetric padding
100
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
101
+ return padding, dynamic
102
+
103
+
104
+ class WeightStandardizedConv2d(nn.Conv2d):
105
+ """Conv2d with Weight Standardization. Includes TensorFlow compatible SAME padding. Used for ViT Hybrid model.
106
+
107
+ Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight
108
+ Standardization](https://arxiv.org/abs/1903.10520v2)
109
+ """
110
+
111
+ def __init__(
112
+ self,
113
+ in_channel,
114
+ out_channels,
115
+ kernel_size,
116
+ stride=1,
117
+ padding="SAME",
118
+ dilation=1,
119
+ groups=1,
120
+ bias=False,
121
+ eps=1e-6,
122
+ ):
123
+ padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
124
+ super().__init__(
125
+ in_channel,
126
+ out_channels,
127
+ kernel_size,
128
+ stride=stride,
129
+ padding=padding,
130
+ dilation=dilation,
131
+ groups=groups,
132
+ bias=bias,
133
+ )
134
+ if is_dynamic:
135
+ self.pad = DynamicPad2d(kernel_size, stride, dilation)
136
+ else:
137
+ self.pad = None
138
+ self.eps = eps
139
+
140
+ def forward(self, hidden_state):
141
+ if self.pad is not None:
142
+ hidden_state = self.pad(hidden_state)
143
+ weight = nn.functional.batch_norm(
144
+ self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps
145
+ ).reshape_as(self.weight)
146
+ hidden_state = nn.functional.conv2d(
147
+ hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
148
+ )
149
+ return hidden_state
150
+
151
+
152
+ class BitGroupNormActivation(nn.GroupNorm):
153
+ r"""
154
+ A module that combines group normalization with an activation function.
155
+ """
156
+
157
+ def __init__(self, config, num_channels, eps=1e-5, affine=True, apply_activation=True):
158
+ super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine)
159
+ if apply_activation:
160
+ self.activation = ACT2FN[config.hidden_act]
161
+ else:
162
+ self.activation = nn.Identity()
163
+
164
+ def forward(self, hidden_state):
165
+ hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps)
166
+ hidden_state = self.activation(hidden_state)
167
+ return hidden_state
168
+
169
+
170
+ class DynamicPad2d(nn.Module):
171
+ r"""
172
+ A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input
173
+ hidden states.
174
+ """
175
+
176
+ def __init__(self, kernel_size, stride, dilation, value=0):
177
+ super().__init__()
178
+ # Safety checkers
179
+ if isinstance(kernel_size, int):
180
+ kernel_size = (kernel_size, kernel_size)
181
+
182
+ if isinstance(stride, int):
183
+ stride = (stride, stride)
184
+
185
+ if isinstance(dilation, int):
186
+ dilation = (dilation, dilation)
187
+
188
+ self.kernel_size = kernel_size
189
+ self.stride = stride
190
+ self.dilation = dilation
191
+ self.value = value
192
+
193
+ def compute_padding(x, kernel_size, stride, dilation):
194
+ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
195
+
196
+ self.compute_padding = compute_padding
197
+
198
+ def __call__(self, input):
199
+ # Get width and height
200
+ input_height, input_width = input.size()[-2:]
201
+
202
+ # Compute the padding values
203
+ padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0])
204
+ padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1])
205
+
206
+ # apply pad
207
+ if padding_height > 0 or padding_width > 0:
208
+ input = nn.functional.pad(
209
+ input,
210
+ [
211
+ padding_width // 2,
212
+ padding_width - padding_width // 2,
213
+ padding_height // 2,
214
+ padding_height - padding_height // 2,
215
+ ],
216
+ value=self.value,
217
+ )
218
+ return input
219
+
220
+
221
+ class BitMaxPool2d(nn.MaxPool2d):
222
+ """Tensorflow like 'SAME' wrapper for 2D max pooling"""
223
+
224
+ def __init__(
225
+ self,
226
+ kernel_size: int,
227
+ stride=None,
228
+ dilation=1,
229
+ ceil_mode=False,
230
+ padding=(0, 0),
231
+ padding_value=0,
232
+ use_dynamic_padding=True,
233
+ ):
234
+ kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size)
235
+ stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
236
+ dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation)
237
+ super().__init__(kernel_size, stride, padding, dilation, ceil_mode)
238
+ if use_dynamic_padding:
239
+ self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value)
240
+ else:
241
+ self.pad = nn.Identity()
242
+
243
+ def forward(self, hidden_states):
244
+ hidden_states = self.pad(hidden_states)
245
+ return nn.functional.max_pool2d(
246
+ hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode
247
+ )
248
+
249
+
250
+ class BitEmbeddings(nn.Module):
251
+ """
252
+ BiT Embeddings (stem) composed of a single aggressive convolution.
253
+ """
254
+
255
+ def __init__(self, config: BitConfig):
256
+ super().__init__()
257
+
258
+ self.convolution = WeightStandardizedConv2d(
259
+ config.num_channels,
260
+ config.embedding_size,
261
+ kernel_size=7,
262
+ stride=2,
263
+ eps=1e-8,
264
+ padding=config.global_padding,
265
+ )
266
+
267
+ self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding)
268
+
269
+ # Use the same padding strategy as convolutional layers
270
+ if config.global_padding is not None and config.global_padding.upper() == "SAME":
271
+ self.pad = nn.Identity()
272
+ else:
273
+ self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0)
274
+
275
+ if not config.layer_type == "preactivation":
276
+ self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size)
277
+ else:
278
+ self.norm = nn.Identity()
279
+
280
+ self.num_channels = config.num_channels
281
+
282
+ def forward(self, pixel_values: Tensor) -> Tensor:
283
+ num_channels = pixel_values.shape[1]
284
+ if num_channels != self.num_channels:
285
+ raise ValueError(
286
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
287
+ )
288
+
289
+ embedding = self.convolution(pixel_values)
290
+
291
+ embedding = self.pad(embedding)
292
+
293
+ embedding = self.norm(embedding)
294
+
295
+ embedding = self.pooler(embedding)
296
+
297
+ return embedding
298
+
299
+
300
+ # Copied from transformers.models.convnext.modeling_convnext.drop_path
301
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
302
+ """
303
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
304
+
305
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
306
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
307
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
308
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
309
+ argument.
310
+ """
311
+ if drop_prob == 0.0 or not training:
312
+ return input
313
+ keep_prob = 1 - drop_prob
314
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
315
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
316
+ random_tensor.floor_() # binarize
317
+ output = input.div(keep_prob) * random_tensor
318
+ return output
319
+
320
+
321
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Bit
322
+ class BitDropPath(nn.Module):
323
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
324
+
325
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
326
+ super().__init__()
327
+ self.drop_prob = drop_prob
328
+
329
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
330
+ return drop_path(hidden_states, self.drop_prob, self.training)
331
+
332
+ def extra_repr(self) -> str:
333
+ return "p={}".format(self.drop_prob)
334
+
335
+
336
+ def make_div(value, divisor=8):
337
+ min_value = divisor
338
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
339
+ if new_value < 0.9 * value:
340
+ new_value += divisor
341
+ return new_value
342
+
343
+
344
+ class BitPreActivationBottleneckLayer(nn.Module):
345
+ """Pre-activation (v2) bottleneck block.
346
+ Follows the implementation of "Identity Mappings in Deep Residual Networks":
347
+ https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
348
+
349
+ Except it puts the stride on 3x3 conv when available.
350
+ """
351
+
352
+ def __init__(
353
+ self,
354
+ config,
355
+ in_channels,
356
+ out_channels=None,
357
+ bottle_ratio=0.25,
358
+ stride=1,
359
+ dilation=1,
360
+ first_dilation=None,
361
+ groups=1,
362
+ drop_path_rate=0.0,
363
+ is_first_layer=False,
364
+ ):
365
+ super().__init__()
366
+
367
+ first_dilation = first_dilation or dilation
368
+
369
+ out_channels = out_channels or in_channels
370
+ mid_channels = make_div(out_channels * bottle_ratio)
371
+
372
+ if is_first_layer:
373
+ self.downsample = BitDownsampleConv(
374
+ config,
375
+ in_channels,
376
+ out_channels,
377
+ stride=stride,
378
+ preact=True,
379
+ )
380
+ else:
381
+ self.downsample = None
382
+
383
+ self.norm1 = BitGroupNormActivation(config, in_channels)
384
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding)
385
+
386
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
387
+ self.conv2 = WeightStandardizedConv2d(
388
+ mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding
389
+ )
390
+
391
+ self.norm3 = BitGroupNormActivation(config, mid_channels)
392
+ self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding)
393
+
394
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
395
+
396
+ def forward(self, hidden_states):
397
+ hidden_states_preact = self.norm1(hidden_states)
398
+
399
+ # shortcut branch
400
+ shortcut = hidden_states
401
+ if self.downsample is not None:
402
+ shortcut = self.downsample(hidden_states_preact)
403
+
404
+ # residual branch
405
+ hidden_states = self.conv1(hidden_states_preact)
406
+ hidden_states = self.conv2(self.norm2(hidden_states))
407
+ hidden_states = self.conv3(self.norm3(hidden_states))
408
+ hidden_states = self.drop_path(hidden_states)
409
+ return hidden_states + shortcut
410
+
411
+
412
+ class BitBottleneckLayer(nn.Module):
413
+ """Non Pre-activation bottleneck block, equivalent to V1.5/V1b bottleneck. Used for ViT Hybrid."""
414
+
415
+ def __init__(
416
+ self,
417
+ config,
418
+ in_channels,
419
+ out_channels=None,
420
+ bottle_ratio=0.25,
421
+ stride=1,
422
+ dilation=1,
423
+ first_dilation=None,
424
+ groups=1,
425
+ drop_path_rate=0.0,
426
+ is_first_layer=False,
427
+ ):
428
+ super().__init__()
429
+ first_dilation = first_dilation or dilation
430
+
431
+ out_channels = out_channels or in_channels
432
+ mid_chs = make_div(out_channels * bottle_ratio)
433
+
434
+ if is_first_layer:
435
+ self.downsample = BitDownsampleConv(
436
+ config,
437
+ in_channels,
438
+ out_channels,
439
+ stride=stride,
440
+ preact=False,
441
+ )
442
+ else:
443
+ self.downsample = None
444
+
445
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-8, padding=config.global_padding)
446
+ self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs)
447
+ self.conv2 = WeightStandardizedConv2d(
448
+ mid_chs,
449
+ mid_chs,
450
+ 3,
451
+ stride=stride,
452
+ dilation=first_dilation,
453
+ groups=groups,
454
+ eps=1e-8,
455
+ padding=config.global_padding,
456
+ )
457
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs)
458
+ self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-8, padding=config.global_padding)
459
+ self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
460
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
461
+
462
+ self.activation = ACT2FN[config.hidden_act]
463
+
464
+ def forward(self, hidden_states):
465
+ # shortcut branch
466
+ shortcut = hidden_states
467
+ if self.downsample is not None:
468
+ shortcut = self.downsample(hidden_states)
469
+
470
+ # residual
471
+ hidden_states = self.conv1(hidden_states)
472
+ hidden_states = self.norm1(hidden_states)
473
+
474
+ hidden_states = self.conv2(hidden_states)
475
+ hidden_states = self.norm2(hidden_states)
476
+
477
+ hidden_states = self.conv3(hidden_states)
478
+ hidden_states = self.norm3(hidden_states)
479
+
480
+ hidden_states = self.drop_path(hidden_states)
481
+ hidden_states = self.activation(hidden_states + shortcut)
482
+ return hidden_states
483
+
484
+
485
+ class BitDownsampleConv(nn.Module):
486
+ def __init__(
487
+ self,
488
+ config,
489
+ in_channels,
490
+ out_channels,
491
+ stride=1,
492
+ preact=True,
493
+ ):
494
+ super().__init__()
495
+ self.conv = WeightStandardizedConv2d(
496
+ in_channels, out_channels, 1, stride=stride, eps=1e-8, padding=config.global_padding
497
+ )
498
+ self.norm = (
499
+ nn.Identity()
500
+ if preact
501
+ else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
502
+ )
503
+
504
+ def forward(self, x):
505
+ return self.norm(self.conv(x))
506
+
507
+
508
+ class BitStage(nn.Module):
509
+ """
510
+ A ResNet v2 stage composed by stacked layers.
511
+ """
512
+
513
+ def __init__(
514
+ self,
515
+ config,
516
+ in_channels,
517
+ out_channels,
518
+ stride,
519
+ dilation,
520
+ depth,
521
+ bottle_ratio=0.25,
522
+ layer_dropout=None,
523
+ ):
524
+ super().__init__()
525
+
526
+ first_dilation = 1 if dilation in (1, 2) else 2
527
+
528
+ # Get the layer type
529
+ if config.layer_type == "bottleneck":
530
+ layer_cls = BitBottleneckLayer
531
+ else:
532
+ layer_cls = BitPreActivationBottleneckLayer
533
+
534
+ prev_chs = in_channels
535
+ self.layers = nn.Sequential()
536
+ for layer_idx in range(depth):
537
+ # Get the current hyper-parameters
538
+ stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters(
539
+ layer_idx, stride, layer_dropout
540
+ )
541
+
542
+ self.layers.add_module(
543
+ str(layer_idx),
544
+ layer_cls(
545
+ config,
546
+ prev_chs,
547
+ out_channels,
548
+ stride=stride,
549
+ dilation=dilation,
550
+ bottle_ratio=bottle_ratio,
551
+ first_dilation=first_dilation,
552
+ drop_path_rate=drop_path_rate,
553
+ is_first_layer=is_first_layer,
554
+ ),
555
+ )
556
+ prev_chs = out_channels
557
+ first_dilation = dilation
558
+
559
+ def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout):
560
+ r"""
561
+ Get the new hyper-parameters with respect to the previous ones and the index of the current layer.
562
+ """
563
+ if layer_dropout:
564
+ drop_path_rate = layer_dropout[layer_idx]
565
+ else:
566
+ drop_path_rate = 0.0
567
+
568
+ if layer_idx != 0:
569
+ stride = 1
570
+
571
+ is_first_layer = layer_idx == 0
572
+
573
+ return stride, drop_path_rate, is_first_layer
574
+
575
+ def forward(self, input: Tensor) -> Tensor:
576
+ hidden_state = input
577
+ for _, layer in enumerate(self.layers):
578
+ hidden_state = layer(hidden_state)
579
+ return hidden_state
580
+
581
+
582
+ class BitEncoder(nn.Module):
583
+ def __init__(self, config: BitConfig):
584
+ super().__init__()
585
+ self.stages = nn.ModuleList([])
586
+
587
+ prev_chs = config.embedding_size
588
+
589
+ # These needs to stay hardcoded
590
+ current_stride = 4
591
+ dilation = 1
592
+
593
+ layer_dropouts = [
594
+ x.tolist()
595
+ for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)
596
+ ]
597
+
598
+ for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate(
599
+ zip(config.depths, config.hidden_sizes, layer_dropouts)
600
+ ):
601
+ # Get the updated hyper params
602
+ out_channels, stride, dilation = self._get_updated_hyperparameters(
603
+ stage_idx, current_stride, current_hidden_size, dilation, config
604
+ )
605
+
606
+ stage = BitStage(
607
+ config,
608
+ prev_chs,
609
+ out_channels,
610
+ stride=stride,
611
+ dilation=dilation,
612
+ depth=current_depth,
613
+ layer_dropout=layer_dropout,
614
+ )
615
+
616
+ prev_chs = out_channels
617
+ current_stride *= stride
618
+
619
+ self.stages.add_module(str(stage_idx), stage)
620
+
621
+ def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config):
622
+ out_channels = make_div(current_hidden_size * config.width_factor)
623
+ stride = 1 if stage_idx == 0 else 2
624
+ if current_stride >= config.output_stride:
625
+ dilation *= stride
626
+ stride = 1
627
+ return out_channels, stride, dilation
628
+
629
+ def forward(
630
+ self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
631
+ ) -> BaseModelOutputWithNoAttention:
632
+ hidden_states = () if output_hidden_states else None
633
+
634
+ for stage_module in self.stages:
635
+ if output_hidden_states:
636
+ hidden_states = hidden_states + (hidden_state,)
637
+
638
+ hidden_state = stage_module(hidden_state)
639
+
640
+ if output_hidden_states:
641
+ hidden_states = hidden_states + (hidden_state,)
642
+
643
+ if not return_dict:
644
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
645
+
646
+ return BaseModelOutputWithNoAttention(
647
+ last_hidden_state=hidden_state,
648
+ hidden_states=hidden_states,
649
+ )
650
+
651
+
652
+ class BitPreTrainedModel(PreTrainedModel):
653
+ """
654
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
655
+ models.
656
+ """
657
+
658
+ config_class = BitConfig
659
+ base_model_prefix = "bit"
660
+ main_input_name = "pixel_values"
661
+
662
+ def _init_weights(self, module):
663
+ if isinstance(module, nn.Conv2d):
664
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
665
+ elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
666
+ nn.init.constant_(module.weight, 1)
667
+ nn.init.constant_(module.bias, 0)
668
+
669
+
670
+ BIT_START_DOCSTRING = r"""
671
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
672
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
673
+ behavior.
674
+
675
+ Parameters:
676
+ config ([`BitConfig`]): Model configuration class with all the parameters of the model.
677
+ Initializing with a config file does not load the weights associated with the model, only the
678
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
679
+ """
680
+
681
+ BIT_INPUTS_DOCSTRING = r"""
682
+ Args:
683
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
684
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`]
685
+ for details.
686
+
687
+ output_hidden_states (`bool`, *optional*):
688
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
689
+ more detail.
690
+ return_dict (`bool`, *optional*):
691
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
692
+ """
693
+
694
+
695
+ @add_start_docstrings(
696
+ "The bare BiT model outputting raw features without any specific head on top.",
697
+ BIT_START_DOCSTRING,
698
+ )
699
+ class BitModel(BitPreTrainedModel):
700
+ def __init__(self, config):
701
+ super().__init__(config)
702
+ self.config = config
703
+
704
+ self.embedder = BitEmbeddings(config)
705
+
706
+ self.encoder = BitEncoder(config)
707
+ self.norm = (
708
+ BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1])
709
+ if config.layer_type == "preactivation"
710
+ else nn.Identity()
711
+ )
712
+
713
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1))
714
+ # Initialize weights and apply final processing
715
+ self.post_init()
716
+
717
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
718
+ @add_code_sample_docstrings(
719
+ checkpoint=_CHECKPOINT_FOR_DOC,
720
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
721
+ config_class=_CONFIG_FOR_DOC,
722
+ modality="vision",
723
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
724
+ )
725
+ def forward(
726
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
727
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
728
+ output_hidden_states = (
729
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
730
+ )
731
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
732
+
733
+ embedding_output = self.embedder(pixel_values)
734
+
735
+ encoder_outputs = self.encoder(
736
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
737
+ )
738
+
739
+ last_hidden_state = encoder_outputs[0]
740
+
741
+ last_hidden_state = self.norm(last_hidden_state)
742
+
743
+ pooled_output = self.pooler(last_hidden_state)
744
+
745
+ if not return_dict:
746
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
747
+
748
+ return BaseModelOutputWithPoolingAndNoAttention(
749
+ last_hidden_state=last_hidden_state,
750
+ pooler_output=pooled_output,
751
+ hidden_states=encoder_outputs.hidden_states,
752
+ )
753
+
754
+
755
+ @add_start_docstrings(
756
+ """
757
+ BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
758
+ ImageNet.
759
+ """,
760
+ BIT_START_DOCSTRING,
761
+ )
762
+ class BitForImageClassification(BitPreTrainedModel):
763
+ def __init__(self, config):
764
+ super().__init__(config)
765
+ self.num_labels = config.num_labels
766
+ self.bit = BitModel(config)
767
+ # classification head
768
+ self.classifier = nn.Sequential(
769
+ nn.Flatten(),
770
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
771
+ )
772
+ # initialize weights and apply final processing
773
+ self.post_init()
774
+
775
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
776
+ @add_code_sample_docstrings(
777
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
778
+ output_type=ImageClassifierOutputWithNoAttention,
779
+ config_class=_CONFIG_FOR_DOC,
780
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
781
+ )
782
+ def forward(
783
+ self,
784
+ pixel_values: Optional[torch.FloatTensor] = None,
785
+ labels: Optional[torch.LongTensor] = None,
786
+ output_hidden_states: Optional[bool] = None,
787
+ return_dict: Optional[bool] = None,
788
+ ) -> ImageClassifierOutputWithNoAttention:
789
+ r"""
790
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
791
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
792
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
793
+ """
794
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
795
+
796
+ outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
797
+
798
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
799
+
800
+ logits = self.classifier(pooled_output)
801
+
802
+ loss = None
803
+
804
+ if labels is not None:
805
+ if self.config.problem_type is None:
806
+ if self.num_labels == 1:
807
+ self.config.problem_type = "regression"
808
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
809
+ self.config.problem_type = "single_label_classification"
810
+ else:
811
+ self.config.problem_type = "multi_label_classification"
812
+ if self.config.problem_type == "regression":
813
+ loss_fct = MSELoss()
814
+ if self.num_labels == 1:
815
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
816
+ else:
817
+ loss = loss_fct(logits, labels)
818
+ elif self.config.problem_type == "single_label_classification":
819
+ loss_fct = CrossEntropyLoss()
820
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
821
+ elif self.config.problem_type == "multi_label_classification":
822
+ loss_fct = BCEWithLogitsLoss()
823
+ loss = loss_fct(logits, labels)
824
+
825
+ if not return_dict:
826
+ output = (logits,) + outputs[2:]
827
+ return (loss,) + output if loss is not None else output
828
+
829
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
830
+
831
+
832
+ @add_start_docstrings(
833
+ """
834
+ BiT backbone, to be used with frameworks like DETR and MaskFormer.
835
+ """,
836
+ BIT_START_DOCSTRING,
837
+ )
838
+ class BitBackbone(BitPreTrainedModel, BackboneMixin):
839
+ def __init__(self, config):
840
+ super().__init__(config)
841
+ super()._init_backbone(config)
842
+
843
+ self.bit = BitModel(config)
844
+ self.num_features = [config.embedding_size] + config.hidden_sizes
845
+
846
+ # initialize weights and apply final processing
847
+ self.post_init()
848
+
849
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
850
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
851
+ def forward(
852
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
853
+ ) -> BackboneOutput:
854
+ """
855
+ Returns:
856
+
857
+ Examples:
858
+
859
+ ```python
860
+ >>> from transformers import AutoImageProcessor, AutoBackbone
861
+ >>> import torch
862
+ >>> from PIL import Image
863
+ >>> import requests
864
+
865
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
866
+ >>> image = Image.open(requests.get(url, stream=True).raw)
867
+
868
+ >>> processor = AutoImageProcessor.from_pretrained("google/resnetnv2-50")
869
+ >>> model = AutoBackbone.from_pretrained("google/resnetnv2-50")
870
+
871
+ >>> inputs = processor(image, return_tensors="pt")
872
+ >>> outputs = model(**inputs)
873
+ ```"""
874
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
875
+ output_hidden_states = (
876
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
877
+ )
878
+
879
+ outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True)
880
+
881
+ hidden_states = outputs.hidden_states
882
+
883
+ feature_maps = ()
884
+ for idx, stage in enumerate(self.stage_names):
885
+ if stage in self.out_features:
886
+ feature_maps += (hidden_states[idx],)
887
+
888
+ if not return_dict:
889
+ output = (feature_maps,)
890
+ if output_hidden_states:
891
+ output += (outputs.hidden_states,)
892
+ return output
893
+
894
+ return BackboneOutput(
895
+ feature_maps=feature_maps,
896
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
897
+ attentions=None,
898
+ )
venv/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_dinov2": ["DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Dinov2Config", "Dinov2OnnxConfig"]
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_dinov2"] = [
34
+ "DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "Dinov2ForImageClassification",
36
+ "Dinov2Model",
37
+ "Dinov2PreTrainedModel",
38
+ "Dinov2Backbone",
39
+ ]
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_dinov2 import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Dinov2Config, Dinov2OnnxConfig
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ from .modeling_dinov2 import (
51
+ DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST,
52
+ Dinov2Backbone,
53
+ Dinov2ForImageClassification,
54
+ Dinov2Model,
55
+ Dinov2PreTrainedModel,
56
+ )
57
+
58
+ else:
59
+ import sys
60
+
61
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (990 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc ADDED
Binary file (7.65 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DINOv2 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an
37
+ Dinov2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
38
+ with the defaults will yield a similar configuration to that of the Dinov2
39
+ [google/dinov2-base-patch16-224](https://huggingface.co/google/dinov2-base-patch16-224) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ mlp_ratio (`int`, *optional*, defaults to 4):
52
+ Ratio of the hidden size of the MLPs relative to the `hidden_size`.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for the attention probabilities.
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
63
+ The epsilon used by the layer normalization layers.
64
+ image_size (`int`, *optional*, defaults to 224):
65
+ The size (resolution) of each image.
66
+ patch_size (`int`, *optional*, defaults to 16):
67
+ The size (resolution) of each patch.
68
+ num_channels (`int`, *optional*, defaults to 3):
69
+ The number of input channels.
70
+ qkv_bias (`bool`, *optional*, defaults to `True`):
71
+ Whether to add a bias to the queries, keys and values.
72
+ layerscale_value (`float`, *optional*, defaults to 1.0):
73
+ Initial value to use for layer scale.
74
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
75
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
76
+ use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
77
+ Whether to use the SwiGLU feedforward neural network.
78
+ out_features (`List[str]`, *optional*):
79
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
80
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
81
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
82
+ same order as defined in the `stage_names` attribute.
83
+ out_indices (`List[int]`, *optional*):
84
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
85
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
86
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
87
+ same order as defined in the `stage_names` attribute.
88
+ apply_layernorm (`bool`, *optional*, defaults to `True`):
89
+ Whether to apply layer normalization to the feature maps in case the model is used as backbone.
90
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
91
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
92
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
93
+ seq_len, hidden_size)`.
94
+
95
+ Example:
96
+
97
+ ```python
98
+ >>> from transformers import Dinov2Config, Dinov2Model
99
+
100
+ >>> # Initializing a Dinov2 dinov2-base-patch16-224 style configuration
101
+ >>> configuration = Dinov2Config()
102
+
103
+ >>> # Initializing a model (with random weights) from the dinov2-base-patch16-224 style configuration
104
+ >>> model = Dinov2Model(configuration)
105
+
106
+ >>> # Accessing the model configuration
107
+ >>> configuration = model.config
108
+ ```"""
109
+
110
+ model_type = "dinov2"
111
+
112
+ def __init__(
113
+ self,
114
+ hidden_size=768,
115
+ num_hidden_layers=12,
116
+ num_attention_heads=12,
117
+ mlp_ratio=4,
118
+ hidden_act="gelu",
119
+ hidden_dropout_prob=0.0,
120
+ attention_probs_dropout_prob=0.0,
121
+ initializer_range=0.02,
122
+ layer_norm_eps=1e-6,
123
+ image_size=224,
124
+ patch_size=16,
125
+ num_channels=3,
126
+ qkv_bias=True,
127
+ layerscale_value=1.0,
128
+ drop_path_rate=0.0,
129
+ use_swiglu_ffn=False,
130
+ out_features=None,
131
+ out_indices=None,
132
+ apply_layernorm=True,
133
+ reshape_hidden_states=True,
134
+ **kwargs,
135
+ ):
136
+ super().__init__(**kwargs)
137
+
138
+ self.hidden_size = hidden_size
139
+ self.num_hidden_layers = num_hidden_layers
140
+ self.num_attention_heads = num_attention_heads
141
+ self.mlp_ratio = mlp_ratio
142
+ self.hidden_act = hidden_act
143
+ self.hidden_dropout_prob = hidden_dropout_prob
144
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
145
+ self.initializer_range = initializer_range
146
+ self.layer_norm_eps = layer_norm_eps
147
+ self.image_size = image_size
148
+ self.patch_size = patch_size
149
+ self.num_channels = num_channels
150
+ self.qkv_bias = qkv_bias
151
+ self.layerscale_value = layerscale_value
152
+ self.drop_path_rate = drop_path_rate
153
+ self.use_swiglu_ffn = use_swiglu_ffn
154
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
155
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
156
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
157
+ )
158
+ self.apply_layernorm = apply_layernorm
159
+ self.reshape_hidden_states = reshape_hidden_states
160
+
161
+
162
+ class Dinov2OnnxConfig(OnnxConfig):
163
+ torch_onnx_minimum_version = version.parse("1.11")
164
+
165
+ @property
166
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
167
+ return OrderedDict(
168
+ [
169
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
170
+ ]
171
+ )
172
+
173
+ @property
174
+ def atol_for_validation(self) -> float:
175
+ return 1e-4
venv/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DINOv2 checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/dinov2/tree/main
18
+ """
19
+
20
+
21
+ import argparse
22
+ import json
23
+ from pathlib import Path
24
+
25
+ import requests
26
+ import torch
27
+ import torch.nn as nn
28
+ from huggingface_hub import hf_hub_download
29
+ from PIL import Image
30
+ from torchvision import transforms
31
+
32
+ from transformers import BitImageProcessor, Dinov2Config, Dinov2ForImageClassification, Dinov2Model
33
+ from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
34
+ from transformers.utils import logging
35
+
36
+
37
+ logging.set_verbosity_info()
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ def get_dinov2_config(model_name, image_classifier=False):
42
+ config = Dinov2Config(image_size=518, patch_size=14)
43
+
44
+ # size of the architecture
45
+ if "vits" in model_name:
46
+ config.hidden_size = 384
47
+ config.num_attention_heads = 6
48
+ elif "vitb" in model_name:
49
+ pass
50
+ elif "vitl" in model_name:
51
+ config.hidden_size = 1024
52
+ config.num_hidden_layers = 24
53
+ config.num_attention_heads = 16
54
+ elif "vitg" in model_name:
55
+ config.use_swiglu_ffn = True
56
+ config.hidden_size = 1536
57
+ config.num_hidden_layers = 40
58
+ config.num_attention_heads = 24
59
+ else:
60
+ raise ValueError("Model not supported")
61
+
62
+ if image_classifier:
63
+ repo_id = "huggingface/label-files"
64
+ filename = "imagenet-1k-id2label.json"
65
+ config.num_labels = 1000
66
+ config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
67
+ config.id2label = {int(k): v for k, v in config.id2label.items()}
68
+
69
+ return config
70
+
71
+
72
+ def create_rename_keys(config):
73
+ rename_keys = []
74
+ # fmt: off
75
+
76
+ # patch embedding layer
77
+ rename_keys.append(("cls_token", "embeddings.cls_token"))
78
+ rename_keys.append(("mask_token", "embeddings.mask_token"))
79
+ rename_keys.append(("pos_embed", "embeddings.position_embeddings"))
80
+ rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight"))
81
+ rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias"))
82
+
83
+ for i in range(config.num_hidden_layers):
84
+ # layernorms
85
+ rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight"))
86
+ rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias"))
87
+ rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight"))
88
+ rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias"))
89
+ # MLP
90
+ if config.use_swiglu_ffn:
91
+ rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight"))
92
+ rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias"))
93
+ rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight"))
94
+ rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias"))
95
+ else:
96
+ rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight"))
97
+ rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias"))
98
+ rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight"))
99
+ rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias"))
100
+ # layerscale
101
+ rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1"))
102
+ rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1"))
103
+ # attention projection layer
104
+ rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight"))
105
+ rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias"))
106
+
107
+ # final layernorm
108
+ rename_keys.append(("norm.weight", "layernorm.weight"))
109
+ rename_keys.append(("norm.bias", "layernorm.bias"))
110
+
111
+ # fmt: on
112
+ return rename_keys
113
+
114
+
115
+ def rename_key(dct, old, new):
116
+ val = dct.pop(old)
117
+ dct[new] = val
118
+
119
+
120
+ # we split up the matrix of each encoder layer into queries, keys and values
121
+ def read_in_q_k_v(state_dict, config):
122
+ for i in range(config.num_hidden_layers):
123
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
124
+ in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
125
+ in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
126
+ # next, add query, keys and values (in that order) to the state dict
127
+ state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
128
+ state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
129
+ state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
130
+ config.hidden_size : config.hidden_size * 2, :
131
+ ]
132
+ state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
133
+ config.hidden_size : config.hidden_size * 2
134
+ ]
135
+ state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :]
136
+ state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
137
+
138
+
139
+ # We will verify our results on an image of cute cats
140
+ def prepare_img():
141
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
142
+ image = Image.open(requests.get(url, stream=True).raw)
143
+ return image
144
+
145
+
146
+ @torch.no_grad()
147
+ def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
148
+ """
149
+ Copy/paste/tweak model's weights to our DINOv2 structure.
150
+ """
151
+
152
+ # define default Dinov2 configuration
153
+ image_classifier = "1layer" in model_name
154
+ config = get_dinov2_config(model_name, image_classifier=image_classifier)
155
+
156
+ # load original model from torch hub
157
+ original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", ""))
158
+ original_model.eval()
159
+
160
+ # load state_dict of original model, remove and rename some keys
161
+ state_dict = original_model.state_dict()
162
+ rename_keys = create_rename_keys(config)
163
+ for src, dest in rename_keys:
164
+ rename_key(state_dict, src, dest)
165
+ read_in_q_k_v(state_dict, config)
166
+
167
+ for key, val in state_dict.copy().items():
168
+ val = state_dict.pop(key)
169
+ if "w12" in key:
170
+ key = key.replace("w12", "weights_in")
171
+ if "w3" in key:
172
+ key = key.replace("w3", "weights_out")
173
+ state_dict[key] = val
174
+
175
+ # load HuggingFace model
176
+ if image_classifier:
177
+ model = Dinov2ForImageClassification(config).eval()
178
+ model.dinov2.load_state_dict(state_dict)
179
+ model_name_to_classifier_dict_url = {
180
+ "dinov2_vits14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth",
181
+ "dinov2_vitb14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth",
182
+ "dinov2_vitl14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth",
183
+ "dinov2_vitg14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth",
184
+ }
185
+ url = model_name_to_classifier_dict_url[model_name]
186
+ classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu")
187
+ model.classifier.weight = nn.Parameter(classifier_state_dict["weight"])
188
+ model.classifier.bias = nn.Parameter(classifier_state_dict["bias"])
189
+ else:
190
+ model = Dinov2Model(config).eval()
191
+ model.load_state_dict(state_dict)
192
+
193
+ # load image
194
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
195
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
196
+
197
+ # preprocess image
198
+ transformations = transforms.Compose(
199
+ [
200
+ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC),
201
+ transforms.CenterCrop(224),
202
+ transforms.ToTensor(),
203
+ transforms.Normalize(
204
+ mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values
205
+ std=IMAGENET_DEFAULT_STD, # across a large photo dataset.
206
+ ),
207
+ ]
208
+ )
209
+
210
+ original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension
211
+
212
+ processor = BitImageProcessor(
213
+ size={"shortest_edge": 256},
214
+ resample=PILImageResampling.BICUBIC,
215
+ image_mean=IMAGENET_DEFAULT_MEAN,
216
+ image_std=IMAGENET_DEFAULT_STD,
217
+ )
218
+ pixel_values = processor(image, return_tensors="pt").pixel_values
219
+
220
+ assert torch.allclose(original_pixel_values, pixel_values)
221
+
222
+ with torch.no_grad():
223
+ outputs = model(pixel_values, output_hidden_states=True)
224
+ original_outputs = original_model(pixel_values)
225
+
226
+ # assert values
227
+ if image_classifier:
228
+ print("Predicted class:")
229
+ class_idx = outputs.logits.argmax(-1).item()
230
+ print(model.config.id2label[class_idx])
231
+ else:
232
+ assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape
233
+ assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3)
234
+ print("Looks ok!")
235
+
236
+ if pytorch_dump_folder_path is not None:
237
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
238
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
239
+ model.save_pretrained(pytorch_dump_folder_path)
240
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
241
+ processor.save_pretrained(pytorch_dump_folder_path)
242
+
243
+ if push_to_hub:
244
+ model_name_to_hf_name = {
245
+ "dinov2_vits14": "dinov2-small",
246
+ "dinov2_vitb14": "dinov2-base",
247
+ "dinov2_vitl14": "dinov2-large",
248
+ "dinov2_vitg14": "dinov2-giant",
249
+ "dinov2_vits14_1layer": "dinov2-small-imagenet1k-1-layer",
250
+ "dinov2_vitb14_1layer": "dinov2-base-imagenet1k-1-layer",
251
+ "dinov2_vitl14_1layer": "dinov2-large-imagenet1k-1-layer",
252
+ "dinov2_vitg14_1layer": "dinov2-giant-imagenet1k-1-layer",
253
+ }
254
+
255
+ name = model_name_to_hf_name[model_name]
256
+ model.push_to_hub(f"facebook/{name}")
257
+ processor.push_to_hub(f"facebook/{name}")
258
+
259
+
260
+ if __name__ == "__main__":
261
+ parser = argparse.ArgumentParser()
262
+ # Required parameters
263
+ parser.add_argument(
264
+ "--model_name",
265
+ default="dinov2_vitb14",
266
+ type=str,
267
+ choices=[
268
+ "dinov2_vits14",
269
+ "dinov2_vitb14",
270
+ "dinov2_vitl14",
271
+ "dinov2_vitg14",
272
+ "dinov2_vits14_1layer",
273
+ "dinov2_vitb14_1layer",
274
+ "dinov2_vitl14_1layer",
275
+ "dinov2_vitg14_1layer",
276
+ ],
277
+ help="Name of the model you'd like to convert.",
278
+ )
279
+ parser.add_argument(
280
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
281
+ )
282
+ parser.add_argument(
283
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
284
+ )
285
+
286
+ args = parser.parse_args()
287
+ convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py ADDED
@@ -0,0 +1,856 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DINOv2 model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from typing import Dict, List, Optional, Set, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BackboneOutput,
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPooling,
32
+ ImageClassifierOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
36
+ from ...utils import (
37
+ add_code_sample_docstrings,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from ...utils.backbone_utils import BackboneMixin
44
+ from .configuration_dinov2 import Dinov2Config
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ # General docstring
50
+ _CONFIG_FOR_DOC = "Dinov2Config"
51
+
52
+ # Base docstring
53
+ _CHECKPOINT_FOR_DOC = "facebook/dinov2-base"
54
+ _EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
55
+
56
+ # Image classification docstring
57
+ _IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-small-imagenet1k-1-layer"
58
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
59
+
60
+
61
+ from ..deprecated._archive_maps import DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
62
+
63
+
64
+ class Dinov2Embeddings(nn.Module):
65
+ """
66
+ Construct the CLS token, mask token, position and patch embeddings.
67
+ """
68
+
69
+ def __init__(self, config: Dinov2Config) -> None:
70
+ super().__init__()
71
+
72
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
73
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
74
+ self.patch_embeddings = Dinov2PatchEmbeddings(config)
75
+ num_patches = self.patch_embeddings.num_patches
76
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
77
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
78
+ self.config = config
79
+
80
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
81
+ """
82
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
83
+ resolution images.
84
+
85
+ Source:
86
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
87
+ """
88
+
89
+ num_patches = embeddings.shape[1] - 1
90
+ num_positions = self.position_embeddings.shape[1] - 1
91
+ if num_patches == num_positions and height == width:
92
+ return self.position_embeddings
93
+ class_pos_embed = self.position_embeddings[:, 0]
94
+ patch_pos_embed = self.position_embeddings[:, 1:]
95
+ dim = embeddings.shape[-1]
96
+ height = height // self.config.patch_size
97
+ width = width // self.config.patch_size
98
+ # we add a small number to avoid floating point error in the interpolation
99
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
100
+ height, width = height + 0.1, width + 0.1
101
+ patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
102
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
103
+ target_dtype = patch_pos_embed.dtype
104
+ patch_pos_embed = nn.functional.interpolate(
105
+ patch_pos_embed.to(dtype=torch.float32),
106
+ scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))),
107
+ mode="bicubic",
108
+ align_corners=False,
109
+ ).to(dtype=target_dtype)
110
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
111
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
112
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
113
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
114
+
115
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
116
+ batch_size, _, height, width = pixel_values.shape
117
+ target_dtype = self.patch_embeddings.projection.weight.dtype
118
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
119
+
120
+ if bool_masked_pos is not None:
121
+ embeddings = torch.where(
122
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
123
+ )
124
+
125
+ # add the [CLS] token to the embedded patch tokens
126
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
127
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
128
+
129
+ # add positional encoding to each token
130
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
131
+
132
+ embeddings = self.dropout(embeddings)
133
+
134
+ return embeddings
135
+
136
+
137
+ class Dinov2PatchEmbeddings(nn.Module):
138
+ """
139
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
140
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
141
+ Transformer.
142
+ """
143
+
144
+ def __init__(self, config):
145
+ super().__init__()
146
+ image_size, patch_size = config.image_size, config.patch_size
147
+ num_channels, hidden_size = config.num_channels, config.hidden_size
148
+
149
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
150
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
151
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
152
+ self.image_size = image_size
153
+ self.patch_size = patch_size
154
+ self.num_channels = num_channels
155
+ self.num_patches = num_patches
156
+
157
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
158
+
159
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
160
+ num_channels = pixel_values.shape[1]
161
+ if num_channels != self.num_channels:
162
+ raise ValueError(
163
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
164
+ f" Expected {self.num_channels} but got {num_channels}."
165
+ )
166
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
167
+ return embeddings
168
+
169
+
170
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2
171
+ class Dinov2SelfAttention(nn.Module):
172
+ def __init__(self, config: Dinov2Config) -> None:
173
+ super().__init__()
174
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
175
+ raise ValueError(
176
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
177
+ f"heads {config.num_attention_heads}."
178
+ )
179
+
180
+ self.num_attention_heads = config.num_attention_heads
181
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
182
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
183
+
184
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
185
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
186
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
187
+
188
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
189
+
190
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
191
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
192
+ x = x.view(new_x_shape)
193
+ return x.permute(0, 2, 1, 3)
194
+
195
+ def forward(
196
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
197
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
198
+ mixed_query_layer = self.query(hidden_states)
199
+
200
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
201
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
202
+ query_layer = self.transpose_for_scores(mixed_query_layer)
203
+
204
+ # Take the dot product between "query" and "key" to get the raw attention scores.
205
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
206
+
207
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
208
+
209
+ # Normalize the attention scores to probabilities.
210
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
211
+
212
+ # This is actually dropping out entire tokens to attend to, which might
213
+ # seem a bit unusual, but is taken from the original Transformer paper.
214
+ attention_probs = self.dropout(attention_probs)
215
+
216
+ # Mask heads if we want to
217
+ if head_mask is not None:
218
+ attention_probs = attention_probs * head_mask
219
+
220
+ context_layer = torch.matmul(attention_probs, value_layer)
221
+
222
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
223
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
224
+ context_layer = context_layer.view(new_context_layer_shape)
225
+
226
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
227
+
228
+ return outputs
229
+
230
+
231
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2
232
+ class Dinov2SelfOutput(nn.Module):
233
+ """
234
+ The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
235
+ layernorm applied before each block.
236
+ """
237
+
238
+ def __init__(self, config: Dinov2Config) -> None:
239
+ super().__init__()
240
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
241
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
242
+
243
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
244
+ hidden_states = self.dense(hidden_states)
245
+ hidden_states = self.dropout(hidden_states)
246
+
247
+ return hidden_states
248
+
249
+
250
+ # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2
251
+ class Dinov2Attention(nn.Module):
252
+ def __init__(self, config: Dinov2Config) -> None:
253
+ super().__init__()
254
+ self.attention = Dinov2SelfAttention(config)
255
+ self.output = Dinov2SelfOutput(config)
256
+ self.pruned_heads = set()
257
+
258
+ def prune_heads(self, heads: Set[int]) -> None:
259
+ if len(heads) == 0:
260
+ return
261
+ heads, index = find_pruneable_heads_and_indices(
262
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
263
+ )
264
+
265
+ # Prune linear layers
266
+ self.attention.query = prune_linear_layer(self.attention.query, index)
267
+ self.attention.key = prune_linear_layer(self.attention.key, index)
268
+ self.attention.value = prune_linear_layer(self.attention.value, index)
269
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
270
+
271
+ # Update hyper params and store pruned heads
272
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
273
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
274
+ self.pruned_heads = self.pruned_heads.union(heads)
275
+
276
+ def forward(
277
+ self,
278
+ hidden_states: torch.Tensor,
279
+ head_mask: Optional[torch.Tensor] = None,
280
+ output_attentions: bool = False,
281
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
282
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
283
+
284
+ attention_output = self.output(self_outputs[0], hidden_states)
285
+
286
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
287
+ return outputs
288
+
289
+
290
+ class Dinov2LayerScale(nn.Module):
291
+ def __init__(self, config) -> None:
292
+ super().__init__()
293
+ self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
294
+
295
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
296
+ return hidden_state * self.lambda1
297
+
298
+
299
+ # Copied from transformers.models.beit.modeling_beit.drop_path
300
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
301
+ """
302
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
303
+
304
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
305
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
306
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
307
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
308
+ argument.
309
+ """
310
+ if drop_prob == 0.0 or not training:
311
+ return input
312
+ keep_prob = 1 - drop_prob
313
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
314
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
315
+ random_tensor.floor_() # binarize
316
+ output = input.div(keep_prob) * random_tensor
317
+ return output
318
+
319
+
320
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath
321
+ class Dinov2DropPath(nn.Module):
322
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
323
+
324
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
325
+ super().__init__()
326
+ self.drop_prob = drop_prob
327
+
328
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
329
+ return drop_path(hidden_states, self.drop_prob, self.training)
330
+
331
+ def extra_repr(self) -> str:
332
+ return "p={}".format(self.drop_prob)
333
+
334
+
335
+ class Dinov2MLP(nn.Module):
336
+ def __init__(self, config) -> None:
337
+ super().__init__()
338
+ in_features = out_features = config.hidden_size
339
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
340
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
341
+ if isinstance(config.hidden_act, str):
342
+ self.activation = ACT2FN[config.hidden_act]
343
+ else:
344
+ self.activation = config.hidden_act
345
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
346
+
347
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
348
+ hidden_state = self.fc1(hidden_state)
349
+ hidden_state = self.activation(hidden_state)
350
+ hidden_state = self.fc2(hidden_state)
351
+ return hidden_state
352
+
353
+
354
+ class Dinov2SwiGLUFFN(nn.Module):
355
+ def __init__(self, config) -> None:
356
+ super().__init__()
357
+ in_features = out_features = config.hidden_size
358
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
359
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
360
+
361
+ self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
362
+ self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
363
+
364
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
365
+ hidden_state = self.weights_in(hidden_state)
366
+ x1, x2 = hidden_state.chunk(2, dim=-1)
367
+ hidden = nn.functional.silu(x1) * x2
368
+ return self.weights_out(hidden)
369
+
370
+
371
+ class Dinov2Layer(nn.Module):
372
+ """This corresponds to the Block class in the original implementation."""
373
+
374
+ def __init__(self, config: Dinov2Config) -> None:
375
+ super().__init__()
376
+
377
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
378
+ self.attention = Dinov2Attention(config)
379
+ self.layer_scale1 = Dinov2LayerScale(config)
380
+ self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
381
+
382
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
383
+
384
+ if config.use_swiglu_ffn:
385
+ self.mlp = Dinov2SwiGLUFFN(config)
386
+ else:
387
+ self.mlp = Dinov2MLP(config)
388
+ self.layer_scale2 = Dinov2LayerScale(config)
389
+
390
+ def forward(
391
+ self,
392
+ hidden_states: torch.Tensor,
393
+ head_mask: Optional[torch.Tensor] = None,
394
+ output_attentions: bool = False,
395
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
396
+ self_attention_outputs = self.attention(
397
+ self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention
398
+ head_mask,
399
+ output_attentions=output_attentions,
400
+ )
401
+ attention_output = self_attention_outputs[0]
402
+
403
+ attention_output = self.layer_scale1(attention_output)
404
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
405
+
406
+ # first residual connection
407
+ hidden_states = self.drop_path(attention_output) + hidden_states
408
+
409
+ # in Dinov2, layernorm is also applied after self-attention
410
+ layer_output = self.norm2(hidden_states)
411
+ layer_output = self.mlp(layer_output)
412
+ layer_output = self.layer_scale2(layer_output)
413
+
414
+ # second residual connection
415
+ layer_output = self.drop_path(layer_output) + hidden_states
416
+
417
+ outputs = (layer_output,) + outputs
418
+
419
+ return outputs
420
+
421
+
422
+ # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2
423
+ class Dinov2Encoder(nn.Module):
424
+ def __init__(self, config: Dinov2Config) -> None:
425
+ super().__init__()
426
+ self.config = config
427
+ self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
428
+ self.gradient_checkpointing = False
429
+
430
+ def forward(
431
+ self,
432
+ hidden_states: torch.Tensor,
433
+ head_mask: Optional[torch.Tensor] = None,
434
+ output_attentions: bool = False,
435
+ output_hidden_states: bool = False,
436
+ return_dict: bool = True,
437
+ ) -> Union[tuple, BaseModelOutput]:
438
+ all_hidden_states = () if output_hidden_states else None
439
+ all_self_attentions = () if output_attentions else None
440
+
441
+ for i, layer_module in enumerate(self.layer):
442
+ if output_hidden_states:
443
+ all_hidden_states = all_hidden_states + (hidden_states,)
444
+
445
+ layer_head_mask = head_mask[i] if head_mask is not None else None
446
+
447
+ if self.gradient_checkpointing and self.training:
448
+ layer_outputs = self._gradient_checkpointing_func(
449
+ layer_module.__call__,
450
+ hidden_states,
451
+ layer_head_mask,
452
+ output_attentions,
453
+ )
454
+ else:
455
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
456
+
457
+ hidden_states = layer_outputs[0]
458
+
459
+ if output_attentions:
460
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
461
+
462
+ if output_hidden_states:
463
+ all_hidden_states = all_hidden_states + (hidden_states,)
464
+
465
+ if not return_dict:
466
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
467
+ return BaseModelOutput(
468
+ last_hidden_state=hidden_states,
469
+ hidden_states=all_hidden_states,
470
+ attentions=all_self_attentions,
471
+ )
472
+
473
+
474
+ class Dinov2PreTrainedModel(PreTrainedModel):
475
+ """
476
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
477
+ models.
478
+ """
479
+
480
+ config_class = Dinov2Config
481
+ base_model_prefix = "dinov2"
482
+ main_input_name = "pixel_values"
483
+ supports_gradient_checkpointing = True
484
+
485
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
486
+ """Initialize the weights"""
487
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
488
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
489
+ # `trunc_normal_cpu` not implemented in `half` issues
490
+ module.weight.data = nn.init.trunc_normal_(
491
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
492
+ ).to(module.weight.dtype)
493
+ if module.bias is not None:
494
+ module.bias.data.zero_()
495
+ elif isinstance(module, nn.LayerNorm):
496
+ module.bias.data.zero_()
497
+ module.weight.data.fill_(1.0)
498
+ elif isinstance(module, Dinov2Embeddings):
499
+ module.position_embeddings.data = nn.init.trunc_normal_(
500
+ module.position_embeddings.data.to(torch.float32),
501
+ mean=0.0,
502
+ std=self.config.initializer_range,
503
+ ).to(module.position_embeddings.dtype)
504
+
505
+ module.cls_token.data = nn.init.trunc_normal_(
506
+ module.cls_token.data.to(torch.float32),
507
+ mean=0.0,
508
+ std=self.config.initializer_range,
509
+ ).to(module.cls_token.dtype)
510
+
511
+
512
+ DINOV2_START_DOCSTRING = r"""
513
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
514
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
515
+ behavior.
516
+
517
+ Parameters:
518
+ config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.
519
+ Initializing with a config file does not load the weights associated with the model, only the
520
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
521
+ """
522
+
523
+ DINOV2_BASE_INPUTS_DOCSTRING = r"""
524
+ Args:
525
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
526
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
527
+ [`BitImageProcessor.preprocess`] for details.
528
+
529
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
530
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
531
+ pre-training.
532
+
533
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
534
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
535
+
536
+ - 1 indicates the head is **not masked**,
537
+ - 0 indicates the head is **masked**.
538
+
539
+ output_attentions (`bool`, *optional*):
540
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
541
+ tensors for more detail.
542
+ output_hidden_states (`bool`, *optional*):
543
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
544
+ more detail.
545
+ return_dict (`bool`, *optional*):
546
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
547
+ """
548
+
549
+ DINOV2_INPUTS_DOCSTRING = r"""
550
+ Args:
551
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
552
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
553
+ [`BitImageProcessor.preprocess`] for details.
554
+
555
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
556
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
557
+
558
+ - 1 indicates the head is **not masked**,
559
+ - 0 indicates the head is **masked**.
560
+
561
+ output_attentions (`bool`, *optional*):
562
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
563
+ tensors for more detail.
564
+ output_hidden_states (`bool`, *optional*):
565
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
566
+ more detail.
567
+ return_dict (`bool`, *optional*):
568
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
569
+ """
570
+
571
+
572
+ @add_start_docstrings(
573
+ "The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.",
574
+ DINOV2_START_DOCSTRING,
575
+ )
576
+ class Dinov2Model(Dinov2PreTrainedModel):
577
+ def __init__(self, config: Dinov2Config):
578
+ super().__init__(config)
579
+ self.config = config
580
+
581
+ self.embeddings = Dinov2Embeddings(config)
582
+ self.encoder = Dinov2Encoder(config)
583
+
584
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
585
+
586
+ # Initialize weights and apply final processing
587
+ self.post_init()
588
+
589
+ def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
590
+ return self.embeddings.patch_embeddings
591
+
592
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
593
+ """
594
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
595
+ class PreTrainedModel
596
+ """
597
+ for layer, heads in heads_to_prune.items():
598
+ self.encoder.layer[layer].attention.prune_heads(heads)
599
+
600
+ @add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING)
601
+ @add_code_sample_docstrings(
602
+ checkpoint=_CHECKPOINT_FOR_DOC,
603
+ output_type=BaseModelOutputWithPooling,
604
+ config_class=_CONFIG_FOR_DOC,
605
+ modality="vision",
606
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
607
+ )
608
+ def forward(
609
+ self,
610
+ pixel_values: Optional[torch.Tensor] = None,
611
+ bool_masked_pos: Optional[torch.Tensor] = None,
612
+ head_mask: Optional[torch.Tensor] = None,
613
+ output_attentions: Optional[bool] = None,
614
+ output_hidden_states: Optional[bool] = None,
615
+ return_dict: Optional[bool] = None,
616
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
617
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
618
+ output_hidden_states = (
619
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
620
+ )
621
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
622
+
623
+ if pixel_values is None:
624
+ raise ValueError("You have to specify pixel_values")
625
+
626
+ # Prepare head mask if needed
627
+ # 1.0 in head_mask indicate we keep the head
628
+ # attention_probs has shape bsz x n_heads x N x N
629
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
630
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
631
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
632
+
633
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
634
+
635
+ encoder_outputs = self.encoder(
636
+ embedding_output,
637
+ head_mask=head_mask,
638
+ output_attentions=output_attentions,
639
+ output_hidden_states=output_hidden_states,
640
+ return_dict=return_dict,
641
+ )
642
+ sequence_output = encoder_outputs[0]
643
+ sequence_output = self.layernorm(sequence_output)
644
+ pooled_output = sequence_output[:, 0, :]
645
+
646
+ if not return_dict:
647
+ head_outputs = (sequence_output, pooled_output)
648
+ return head_outputs + encoder_outputs[1:]
649
+
650
+ return BaseModelOutputWithPooling(
651
+ last_hidden_state=sequence_output,
652
+ pooler_output=pooled_output,
653
+ hidden_states=encoder_outputs.hidden_states,
654
+ attentions=encoder_outputs.attentions,
655
+ )
656
+
657
+
658
+ @add_start_docstrings(
659
+ """
660
+ Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
661
+ of the [CLS] token) e.g. for ImageNet.
662
+ """,
663
+ DINOV2_START_DOCSTRING,
664
+ )
665
+ class Dinov2ForImageClassification(Dinov2PreTrainedModel):
666
+ def __init__(self, config: Dinov2Config) -> None:
667
+ super().__init__(config)
668
+
669
+ self.num_labels = config.num_labels
670
+ self.dinov2 = Dinov2Model(config)
671
+
672
+ # Classifier head
673
+ self.classifier = (
674
+ nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
675
+ )
676
+
677
+ # Initialize weights and apply final processing
678
+ self.post_init()
679
+
680
+ @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
681
+ @add_code_sample_docstrings(
682
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
683
+ output_type=ImageClassifierOutput,
684
+ config_class=_CONFIG_FOR_DOC,
685
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
686
+ )
687
+ def forward(
688
+ self,
689
+ pixel_values: Optional[torch.Tensor] = None,
690
+ head_mask: Optional[torch.Tensor] = None,
691
+ labels: Optional[torch.Tensor] = None,
692
+ output_attentions: Optional[bool] = None,
693
+ output_hidden_states: Optional[bool] = None,
694
+ return_dict: Optional[bool] = None,
695
+ ) -> Union[tuple, ImageClassifierOutput]:
696
+ r"""
697
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
698
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
699
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
700
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
701
+ """
702
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
703
+
704
+ outputs = self.dinov2(
705
+ pixel_values,
706
+ head_mask=head_mask,
707
+ output_attentions=output_attentions,
708
+ output_hidden_states=output_hidden_states,
709
+ return_dict=return_dict,
710
+ )
711
+
712
+ sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
713
+
714
+ cls_token = sequence_output[:, 0]
715
+ patch_tokens = sequence_output[:, 1:]
716
+
717
+ linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
718
+
719
+ logits = self.classifier(linear_input)
720
+
721
+ loss = None
722
+ if labels is not None:
723
+ # move labels to correct device to enable model parallelism
724
+ labels = labels.to(logits.device)
725
+ if self.config.problem_type is None:
726
+ if self.num_labels == 1:
727
+ self.config.problem_type = "regression"
728
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
729
+ self.config.problem_type = "single_label_classification"
730
+ else:
731
+ self.config.problem_type = "multi_label_classification"
732
+
733
+ if self.config.problem_type == "regression":
734
+ loss_fct = MSELoss()
735
+ if self.num_labels == 1:
736
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
737
+ else:
738
+ loss = loss_fct(logits, labels)
739
+ elif self.config.problem_type == "single_label_classification":
740
+ loss_fct = CrossEntropyLoss()
741
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
742
+ elif self.config.problem_type == "multi_label_classification":
743
+ loss_fct = BCEWithLogitsLoss()
744
+ loss = loss_fct(logits, labels)
745
+
746
+ if not return_dict:
747
+ output = (logits,) + outputs[2:]
748
+ return ((loss,) + output) if loss is not None else output
749
+
750
+ return ImageClassifierOutput(
751
+ loss=loss,
752
+ logits=logits,
753
+ hidden_states=outputs.hidden_states,
754
+ attentions=outputs.attentions,
755
+ )
756
+
757
+
758
+ @add_start_docstrings(
759
+ """
760
+ Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.
761
+ """,
762
+ DINOV2_START_DOCSTRING,
763
+ )
764
+ class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
765
+ def __init__(self, config):
766
+ super().__init__(config)
767
+ super()._init_backbone(config)
768
+
769
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
770
+ self.embeddings = Dinov2Embeddings(config)
771
+ self.encoder = Dinov2Encoder(config)
772
+
773
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
774
+
775
+ # Initialize weights and apply final processing
776
+ self.post_init()
777
+
778
+ def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
779
+ return self.embeddings.patch_embeddings
780
+
781
+ @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
782
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
783
+ def forward(
784
+ self,
785
+ pixel_values: torch.Tensor,
786
+ output_hidden_states: Optional[bool] = None,
787
+ output_attentions: Optional[bool] = None,
788
+ return_dict: Optional[bool] = None,
789
+ ) -> BackboneOutput:
790
+ """
791
+ Returns:
792
+
793
+ Examples:
794
+
795
+ ```python
796
+ >>> from transformers import AutoImageProcessor, AutoBackbone
797
+ >>> import torch
798
+ >>> from PIL import Image
799
+ >>> import requests
800
+
801
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
802
+ >>> image = Image.open(requests.get(url, stream=True).raw)
803
+
804
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
805
+ >>> model = AutoBackbone.from_pretrained(
806
+ ... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
807
+ ... )
808
+
809
+ >>> inputs = processor(image, return_tensors="pt")
810
+
811
+ >>> outputs = model(**inputs)
812
+ >>> feature_maps = outputs.feature_maps
813
+ >>> list(feature_maps[-1].shape)
814
+ [1, 768, 16, 16]
815
+ ```"""
816
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
817
+ output_hidden_states = (
818
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
819
+ )
820
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
821
+
822
+ embedding_output = self.embeddings(pixel_values)
823
+
824
+ outputs = self.encoder(
825
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
826
+ )
827
+
828
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
829
+
830
+ feature_maps = ()
831
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
832
+ if stage in self.out_features:
833
+ if self.config.apply_layernorm:
834
+ hidden_state = self.layernorm(hidden_state)
835
+ if self.config.reshape_hidden_states:
836
+ hidden_state = hidden_state[:, 1:]
837
+ # this was actually a bug in the original implementation that we copied here,
838
+ # cause normally the order is height, width
839
+ batch_size, _, height, width = pixel_values.shape
840
+ patch_size = self.config.patch_size
841
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
842
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
843
+ feature_maps += (hidden_state,)
844
+
845
+ if not return_dict:
846
+ if output_hidden_states:
847
+ output = (feature_maps,) + outputs[1:]
848
+ else:
849
+ output = (feature_maps,) + outputs[2:]
850
+ return output
851
+
852
+ return BackboneOutput(
853
+ feature_maps=feature_maps,
854
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
855
+ attentions=outputs.attentions if output_attentions else None,
856
+ )
venv/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_distilbert": [
29
+ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "DistilBertConfig",
31
+ "DistilBertOnnxConfig",
32
+ ],
33
+ "tokenization_distilbert": ["DistilBertTokenizer"],
34
+ }
35
+
36
+ try:
37
+ if not is_tokenizers_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"]
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_distilbert"] = [
51
+ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
52
+ "DistilBertForMaskedLM",
53
+ "DistilBertForMultipleChoice",
54
+ "DistilBertForQuestionAnswering",
55
+ "DistilBertForSequenceClassification",
56
+ "DistilBertForTokenClassification",
57
+ "DistilBertModel",
58
+ "DistilBertPreTrainedModel",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_distilbert"] = [
68
+ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
69
+ "TFDistilBertForMaskedLM",
70
+ "TFDistilBertForMultipleChoice",
71
+ "TFDistilBertForQuestionAnswering",
72
+ "TFDistilBertForSequenceClassification",
73
+ "TFDistilBertForTokenClassification",
74
+ "TFDistilBertMainLayer",
75
+ "TFDistilBertModel",
76
+ "TFDistilBertPreTrainedModel",
77
+ ]
78
+
79
+ try:
80
+ if not is_flax_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ _import_structure["modeling_flax_distilbert"] = [
86
+ "FlaxDistilBertForMaskedLM",
87
+ "FlaxDistilBertForMultipleChoice",
88
+ "FlaxDistilBertForQuestionAnswering",
89
+ "FlaxDistilBertForSequenceClassification",
90
+ "FlaxDistilBertForTokenClassification",
91
+ "FlaxDistilBertModel",
92
+ "FlaxDistilBertPreTrainedModel",
93
+ ]
94
+
95
+
96
+ if TYPE_CHECKING:
97
+ from .configuration_distilbert import (
98
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
99
+ DistilBertConfig,
100
+ DistilBertOnnxConfig,
101
+ )
102
+ from .tokenization_distilbert import DistilBertTokenizer
103
+
104
+ try:
105
+ if not is_tokenizers_available():
106
+ raise OptionalDependencyNotAvailable()
107
+ except OptionalDependencyNotAvailable:
108
+ pass
109
+ else:
110
+ from .tokenization_distilbert_fast import DistilBertTokenizerFast
111
+
112
+ try:
113
+ if not is_torch_available():
114
+ raise OptionalDependencyNotAvailable()
115
+ except OptionalDependencyNotAvailable:
116
+ pass
117
+ else:
118
+ from .modeling_distilbert import (
119
+ DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
120
+ DistilBertForMaskedLM,
121
+ DistilBertForMultipleChoice,
122
+ DistilBertForQuestionAnswering,
123
+ DistilBertForSequenceClassification,
124
+ DistilBertForTokenClassification,
125
+ DistilBertModel,
126
+ DistilBertPreTrainedModel,
127
+ )
128
+
129
+ try:
130
+ if not is_tf_available():
131
+ raise OptionalDependencyNotAvailable()
132
+ except OptionalDependencyNotAvailable:
133
+ pass
134
+ else:
135
+ from .modeling_tf_distilbert import (
136
+ TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
137
+ TFDistilBertForMaskedLM,
138
+ TFDistilBertForMultipleChoice,
139
+ TFDistilBertForQuestionAnswering,
140
+ TFDistilBertForSequenceClassification,
141
+ TFDistilBertForTokenClassification,
142
+ TFDistilBertMainLayer,
143
+ TFDistilBertModel,
144
+ TFDistilBertPreTrainedModel,
145
+ )
146
+
147
+ try:
148
+ if not is_flax_available():
149
+ raise OptionalDependencyNotAvailable()
150
+ except OptionalDependencyNotAvailable:
151
+ pass
152
+ else:
153
+ from .modeling_flax_distilbert import (
154
+ FlaxDistilBertForMaskedLM,
155
+ FlaxDistilBertForMultipleChoice,
156
+ FlaxDistilBertForQuestionAnswering,
157
+ FlaxDistilBertForSequenceClassification,
158
+ FlaxDistilBertForTokenClassification,
159
+ FlaxDistilBertModel,
160
+ FlaxDistilBertPreTrainedModel,
161
+ )
162
+
163
+ else:
164
+ import sys
165
+
166
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc ADDED
Binary file (5.58 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc ADDED
Binary file (41.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc ADDED
Binary file (35.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DistilBERT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class DistilBertConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
33
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
34
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
35
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 30522):
42
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
43
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
44
+ max_position_embeddings (`int`, *optional*, defaults to 512):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
48
+ Whether to use sinusoidal positional embeddings.
49
+ n_layers (`int`, *optional*, defaults to 6):
50
+ Number of hidden layers in the Transformer encoder.
51
+ n_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ dim (`int`, *optional*, defaults to 768):
54
+ Dimensionality of the encoder layers and the pooler layer.
55
+ hidden_dim (`int`, *optional*, defaults to 3072):
56
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
57
+ dropout (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
62
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
63
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ qa_dropout (`float`, *optional*, defaults to 0.1):
67
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
68
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
69
+ The dropout probabilities used in the sequence classification and the multiple choice model
70
+ [`DistilBertForSequenceClassification`].
71
+
72
+ Examples:
73
+
74
+ ```python
75
+ >>> from transformers import DistilBertConfig, DistilBertModel
76
+
77
+ >>> # Initializing a DistilBERT configuration
78
+ >>> configuration = DistilBertConfig()
79
+
80
+ >>> # Initializing a model (with random weights) from the configuration
81
+ >>> model = DistilBertModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "distilbert"
88
+ attribute_map = {
89
+ "hidden_size": "dim",
90
+ "num_attention_heads": "n_heads",
91
+ "num_hidden_layers": "n_layers",
92
+ }
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=30522,
97
+ max_position_embeddings=512,
98
+ sinusoidal_pos_embds=False,
99
+ n_layers=6,
100
+ n_heads=12,
101
+ dim=768,
102
+ hidden_dim=4 * 768,
103
+ dropout=0.1,
104
+ attention_dropout=0.1,
105
+ activation="gelu",
106
+ initializer_range=0.02,
107
+ qa_dropout=0.1,
108
+ seq_classif_dropout=0.2,
109
+ pad_token_id=0,
110
+ **kwargs,
111
+ ):
112
+ self.vocab_size = vocab_size
113
+ self.max_position_embeddings = max_position_embeddings
114
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
115
+ self.n_layers = n_layers
116
+ self.n_heads = n_heads
117
+ self.dim = dim
118
+ self.hidden_dim = hidden_dim
119
+ self.dropout = dropout
120
+ self.attention_dropout = attention_dropout
121
+ self.activation = activation
122
+ self.initializer_range = initializer_range
123
+ self.qa_dropout = qa_dropout
124
+ self.seq_classif_dropout = seq_classif_dropout
125
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
126
+
127
+
128
+ class DistilBertOnnxConfig(OnnxConfig):
129
+ @property
130
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
131
+ if self.task == "multiple-choice":
132
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
133
+ else:
134
+ dynamic_axis = {0: "batch", 1: "sequence"}
135
+ return OrderedDict(
136
+ [
137
+ ("input_ids", dynamic_axis),
138
+ ("attention_mask", dynamic_axis),
139
+ ]
140
+ )
venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py ADDED
@@ -0,0 +1,1384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
18
+ part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
19
+ """
20
+
21
+
22
+ import math
23
+ from typing import Dict, List, Optional, Set, Tuple, Union
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import get_activation
32
+ from ...configuration_utils import PretrainedConfig
33
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
34
+ from ...modeling_outputs import (
35
+ BaseModelOutput,
36
+ MaskedLMOutput,
37
+ MultipleChoiceModelOutput,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutput,
40
+ TokenClassifierOutput,
41
+ )
42
+ from ...modeling_utils import PreTrainedModel
43
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
44
+ from ...utils import (
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ is_flash_attn_2_available,
49
+ is_flash_attn_greater_or_equal_2_10,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from .configuration_distilbert import DistilBertConfig
54
+
55
+
56
+ if is_flash_attn_2_available():
57
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
58
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
63
+ _CONFIG_FOR_DOC = "DistilBertConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
70
+
71
+
72
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
73
+ def _get_unpad_data(attention_mask):
74
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
75
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
76
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
77
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
78
+ return (
79
+ indices,
80
+ cu_seqlens,
81
+ max_seqlen_in_batch,
82
+ )
83
+
84
+
85
+ def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
86
+ if is_deepspeed_zero3_enabled():
87
+ import deepspeed
88
+
89
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
90
+ if torch.distributed.get_rank() == 0:
91
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
92
+ else:
93
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
94
+
95
+
96
+ def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
97
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
98
+ out.requires_grad = False
99
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
100
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
101
+ out.detach_()
102
+
103
+
104
+ class Embeddings(nn.Module):
105
+ def __init__(self, config: PretrainedConfig):
106
+ super().__init__()
107
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
108
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
109
+
110
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
111
+ self.dropout = nn.Dropout(config.dropout)
112
+ self.register_buffer(
113
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
114
+ )
115
+
116
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
117
+ """
118
+ Parameters:
119
+ input_ids (torch.Tensor):
120
+ torch.tensor(bs, max_seq_length) The token ids to embed.
121
+ input_embeds (*optional*, torch.Tensor):
122
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
123
+
124
+
125
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
126
+ embeddings)
127
+ """
128
+ if input_ids is not None:
129
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
130
+
131
+ seq_length = input_embeds.size(1)
132
+
133
+ # Setting the position-ids to the registered buffer in constructor, it helps
134
+ # when tracing the model without passing position-ids, solves
135
+ # isues similar to issue #5664
136
+ if hasattr(self, "position_ids"):
137
+ position_ids = self.position_ids[:, :seq_length]
138
+ else:
139
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
140
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
141
+
142
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
143
+
144
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
145
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
146
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
147
+ return embeddings
148
+
149
+
150
+ class MultiHeadSelfAttention(nn.Module):
151
+ def __init__(self, config: PretrainedConfig):
152
+ super().__init__()
153
+ self.config = config
154
+
155
+ self.n_heads = config.n_heads
156
+ self.dim = config.dim
157
+ self.dropout = nn.Dropout(p=config.attention_dropout)
158
+ self.is_causal = False
159
+
160
+ # Have an even number of multi heads that divide the dimensions
161
+ if self.dim % self.n_heads != 0:
162
+ # Raise value errors for even multi-head attention nodes
163
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
164
+
165
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
166
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
167
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
168
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
169
+
170
+ self.pruned_heads: Set[int] = set()
171
+ self.attention_head_size = self.dim // self.n_heads
172
+
173
+ def prune_heads(self, heads: List[int]):
174
+ if len(heads) == 0:
175
+ return
176
+ heads, index = find_pruneable_heads_and_indices(
177
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
178
+ )
179
+ # Prune linear layers
180
+ self.q_lin = prune_linear_layer(self.q_lin, index)
181
+ self.k_lin = prune_linear_layer(self.k_lin, index)
182
+ self.v_lin = prune_linear_layer(self.v_lin, index)
183
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
184
+ # Update hyper params
185
+ self.n_heads = self.n_heads - len(heads)
186
+ self.dim = self.attention_head_size * self.n_heads
187
+ self.pruned_heads = self.pruned_heads.union(heads)
188
+
189
+ def forward(
190
+ self,
191
+ query: torch.Tensor,
192
+ key: torch.Tensor,
193
+ value: torch.Tensor,
194
+ mask: torch.Tensor,
195
+ head_mask: Optional[torch.Tensor] = None,
196
+ output_attentions: bool = False,
197
+ ) -> Tuple[torch.Tensor, ...]:
198
+ """
199
+ Parameters:
200
+ query: torch.tensor(bs, seq_length, dim)
201
+ key: torch.tensor(bs, seq_length, dim)
202
+ value: torch.tensor(bs, seq_length, dim)
203
+ mask: torch.tensor(bs, seq_length)
204
+
205
+ Returns:
206
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
207
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
208
+ """
209
+ bs, q_length, dim = query.size()
210
+ k_length = key.size(1)
211
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
212
+ # assert key.size() == value.size()
213
+
214
+ dim_per_head = self.dim // self.n_heads
215
+
216
+ mask_reshp = (bs, 1, 1, k_length)
217
+
218
+ def shape(x: torch.Tensor) -> torch.Tensor:
219
+ """separate heads"""
220
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
221
+
222
+ def unshape(x: torch.Tensor) -> torch.Tensor:
223
+ """group heads"""
224
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
225
+
226
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
227
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
228
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
229
+
230
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
231
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
232
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
233
+ scores = scores.masked_fill(
234
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
235
+ ) # (bs, n_heads, q_length, k_length)
236
+
237
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
238
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
239
+
240
+ # Mask heads if we want to
241
+ if head_mask is not None:
242
+ weights = weights * head_mask
243
+
244
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
245
+ context = unshape(context) # (bs, q_length, dim)
246
+ context = self.out_lin(context) # (bs, q_length, dim)
247
+
248
+ if output_attentions:
249
+ return (context, weights)
250
+ else:
251
+ return (context,)
252
+
253
+
254
+ class DistilBertFlashAttention2(MultiHeadSelfAttention):
255
+ """
256
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
257
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
258
+ API of flash attention and deal with padding tokens in case the input contains any of them.
259
+ """
260
+
261
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
262
+ def __init__(self, *args, **kwargs):
263
+ super().__init__(*args, **kwargs)
264
+
265
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
266
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
267
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
268
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
269
+
270
+ def forward(
271
+ self,
272
+ query: torch.Tensor,
273
+ key: torch.Tensor,
274
+ value: torch.Tensor,
275
+ mask: torch.Tensor,
276
+ head_mask: Optional[torch.Tensor] = None,
277
+ output_attentions: bool = False,
278
+ ) -> Tuple[torch.Tensor, ...]:
279
+ """
280
+ Parameters:
281
+ query: torch.tensor(bs, seq_length, dim)
282
+ key: torch.tensor(bs, seq_length, dim)
283
+ value: torch.tensor(bs, seq_length, dim)
284
+ mask: torch.tensor(bs, seq_length)
285
+
286
+ Returns:
287
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
288
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
289
+ """
290
+ batch_size, q_length, dim = query.size()
291
+
292
+ dim_per_head = self.dim // self.n_heads
293
+
294
+ def reshape(x: torch.Tensor) -> torch.Tensor:
295
+ """separate heads"""
296
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
297
+
298
+ # Flash attention requires the input to have the shape
299
+ # batch_size x seq_length x head_dim x hidden_dim
300
+ query_states = reshape(self.q_lin(query))
301
+ key_states = reshape(self.k_lin(key))
302
+ value_states = reshape(self.v_lin(value))
303
+
304
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
305
+
306
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
307
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
308
+ # cast them back in the correct dtype just to be sure everything works as expected.
309
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
310
+ # in fp32. (LlamaRMSNorm handles it correctly)
311
+
312
+ if query_states.dtype == torch.float32:
313
+ if torch.is_autocast_enabled():
314
+ target_dtype = torch.get_autocast_gpu_dtype()
315
+ # Handle the case where the model is quantized
316
+ elif hasattr(self.config, "_pre_quantization_dtype"):
317
+ target_dtype = self.config._pre_quantization_dtype
318
+ else:
319
+ target_dtype = self.q_lin.weight.dtype
320
+
321
+ logger.warning_once(
322
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
323
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
324
+ f" {target_dtype}."
325
+ )
326
+
327
+ query_states = query_states.to(target_dtype)
328
+ key_states = key_states.to(target_dtype)
329
+ value_states = value_states.to(target_dtype)
330
+
331
+ attn_weights = self._flash_attention_forward(
332
+ query_states, key_states, value_states, mask, q_length, dropout=attn_dropout
333
+ )
334
+
335
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
336
+ attn_output = self.out_lin(attn_weights_reshaped)
337
+
338
+ if output_attentions:
339
+ return (attn_output, attn_weights)
340
+ else:
341
+ return (attn_output,)
342
+
343
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False
344
+ def _flash_attention_forward(
345
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
346
+ ):
347
+ """
348
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
349
+ first unpad the input, then computes the attention scores and pad the final attention scores.
350
+
351
+ Args:
352
+ query_states (`torch.Tensor`):
353
+ Input query states to be passed to Flash Attention API
354
+ key_states (`torch.Tensor`):
355
+ Input key states to be passed to Flash Attention API
356
+ value_states (`torch.Tensor`):
357
+ Input value states to be passed to Flash Attention API
358
+ attention_mask (`torch.Tensor`):
359
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
360
+ position of padding tokens and 1 for the position of non-padding tokens.
361
+ dropout (`float`):
362
+ Attention dropout
363
+ softmax_scale (`float`, *optional*):
364
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
365
+ """
366
+ if not self._flash_attn_uses_top_left_mask:
367
+ causal = self.is_causal
368
+ else:
369
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
370
+ causal = self.is_causal and query_length != 1
371
+
372
+ # Contains at least one padding token in the sequence
373
+ if attention_mask is not None:
374
+ batch_size = query_states.shape[0]
375
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
376
+ query_states, key_states, value_states, attention_mask, query_length
377
+ )
378
+
379
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
380
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
381
+
382
+ attn_output_unpad = flash_attn_varlen_func(
383
+ query_states,
384
+ key_states,
385
+ value_states,
386
+ cu_seqlens_q=cu_seqlens_q,
387
+ cu_seqlens_k=cu_seqlens_k,
388
+ max_seqlen_q=max_seqlen_in_batch_q,
389
+ max_seqlen_k=max_seqlen_in_batch_k,
390
+ dropout_p=dropout,
391
+ softmax_scale=softmax_scale,
392
+ causal=causal,
393
+ )
394
+
395
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
396
+ else:
397
+ attn_output = flash_attn_func(
398
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
399
+ )
400
+
401
+ return attn_output
402
+
403
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads
404
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
405
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
406
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
407
+
408
+ key_layer = index_first_axis(
409
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
410
+ )
411
+ value_layer = index_first_axis(
412
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
413
+ )
414
+ if query_length == kv_seq_len:
415
+ query_layer = index_first_axis(
416
+ query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k
417
+ )
418
+ cu_seqlens_q = cu_seqlens_k
419
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
420
+ indices_q = indices_k
421
+ elif query_length == 1:
422
+ max_seqlen_in_batch_q = 1
423
+ cu_seqlens_q = torch.arange(
424
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
425
+ ) # There is a memcpy here, that is very bad.
426
+ indices_q = cu_seqlens_q[:-1]
427
+ query_layer = query_layer.squeeze(1)
428
+ else:
429
+ # The -q_len: slice assumes left padding.
430
+ attention_mask = attention_mask[:, -query_length:]
431
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
432
+
433
+ return (
434
+ query_layer,
435
+ key_layer,
436
+ value_layer,
437
+ indices_q,
438
+ (cu_seqlens_q, cu_seqlens_k),
439
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
440
+ )
441
+
442
+
443
+ class FFN(nn.Module):
444
+ def __init__(self, config: PretrainedConfig):
445
+ super().__init__()
446
+ self.dropout = nn.Dropout(p=config.dropout)
447
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
448
+ self.seq_len_dim = 1
449
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
450
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
451
+ self.activation = get_activation(config.activation)
452
+
453
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
454
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
455
+
456
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
457
+ x = self.lin1(input)
458
+ x = self.activation(x)
459
+ x = self.lin2(x)
460
+ x = self.dropout(x)
461
+ return x
462
+
463
+
464
+ DISTILBERT_ATTENTION_CLASSES = {
465
+ "eager": MultiHeadSelfAttention,
466
+ "flash_attention_2": DistilBertFlashAttention2,
467
+ }
468
+
469
+
470
+ class TransformerBlock(nn.Module):
471
+ def __init__(self, config: PretrainedConfig):
472
+ super().__init__()
473
+
474
+ # Have an even number of Configure multi-heads
475
+ if config.dim % config.n_heads != 0:
476
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
477
+
478
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
479
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
480
+
481
+ self.ffn = FFN(config)
482
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
483
+
484
+ def forward(
485
+ self,
486
+ x: torch.Tensor,
487
+ attn_mask: Optional[torch.Tensor] = None,
488
+ head_mask: Optional[torch.Tensor] = None,
489
+ output_attentions: bool = False,
490
+ ) -> Tuple[torch.Tensor, ...]:
491
+ """
492
+ Parameters:
493
+ x: torch.tensor(bs, seq_length, dim)
494
+ attn_mask: torch.tensor(bs, seq_length)
495
+
496
+ Returns:
497
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
498
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
499
+ """
500
+ # Self-Attention
501
+ sa_output = self.attention(
502
+ query=x,
503
+ key=x,
504
+ value=x,
505
+ mask=attn_mask,
506
+ head_mask=head_mask,
507
+ output_attentions=output_attentions,
508
+ )
509
+ if output_attentions:
510
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
511
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
512
+ if type(sa_output) != tuple:
513
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
514
+
515
+ sa_output = sa_output[0]
516
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
517
+
518
+ # Feed Forward Network
519
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
520
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
521
+
522
+ output = (ffn_output,)
523
+ if output_attentions:
524
+ output = (sa_weights,) + output
525
+ return output
526
+
527
+
528
+ class Transformer(nn.Module):
529
+ def __init__(self, config: PretrainedConfig):
530
+ super().__init__()
531
+ self.n_layers = config.n_layers
532
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
533
+ self.gradient_checkpointing = False
534
+
535
+ def forward(
536
+ self,
537
+ x: torch.Tensor,
538
+ attn_mask: Optional[torch.Tensor] = None,
539
+ head_mask: Optional[torch.Tensor] = None,
540
+ output_attentions: bool = False,
541
+ output_hidden_states: bool = False,
542
+ return_dict: Optional[bool] = None,
543
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
544
+ """
545
+ Parameters:
546
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
547
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
548
+
549
+ Returns:
550
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
551
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
552
+ Tuple of length n_layers with the hidden states from each layer.
553
+ Optional: only if output_hidden_states=True
554
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
555
+ Tuple of length n_layers with the attention weights from each layer
556
+ Optional: only if output_attentions=True
557
+ """
558
+ all_hidden_states = () if output_hidden_states else None
559
+ all_attentions = () if output_attentions else None
560
+
561
+ hidden_state = x
562
+ for i, layer_module in enumerate(self.layer):
563
+ if output_hidden_states:
564
+ all_hidden_states = all_hidden_states + (hidden_state,)
565
+
566
+ if self.gradient_checkpointing and self.training:
567
+ layer_outputs = self._gradient_checkpointing_func(
568
+ layer_module.__call__,
569
+ hidden_state,
570
+ attn_mask,
571
+ head_mask[i],
572
+ output_attentions,
573
+ )
574
+ else:
575
+ layer_outputs = layer_module(
576
+ hidden_state,
577
+ attn_mask,
578
+ head_mask[i],
579
+ output_attentions,
580
+ )
581
+
582
+ hidden_state = layer_outputs[-1]
583
+
584
+ if output_attentions:
585
+ if len(layer_outputs) != 2:
586
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
587
+
588
+ attentions = layer_outputs[0]
589
+ all_attentions = all_attentions + (attentions,)
590
+ else:
591
+ if len(layer_outputs) != 1:
592
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
593
+
594
+ # Add last layer
595
+ if output_hidden_states:
596
+ all_hidden_states = all_hidden_states + (hidden_state,)
597
+
598
+ if not return_dict:
599
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
600
+ return BaseModelOutput(
601
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
602
+ )
603
+
604
+
605
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
606
+ class DistilBertPreTrainedModel(PreTrainedModel):
607
+ """
608
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
609
+ models.
610
+ """
611
+
612
+ config_class = DistilBertConfig
613
+ load_tf_weights = None
614
+ base_model_prefix = "distilbert"
615
+ supports_gradient_checkpointing = True
616
+ _supports_flash_attn_2 = True
617
+
618
+ def _init_weights(self, module: nn.Module):
619
+ """Initialize the weights."""
620
+ if isinstance(module, nn.Linear):
621
+ # Slightly different from the TF version which uses truncated_normal for initialization
622
+ # cf https://github.com/pytorch/pytorch/pull/5617
623
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
624
+ if module.bias is not None:
625
+ module.bias.data.zero_()
626
+ elif isinstance(module, nn.Embedding):
627
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
628
+ if module.padding_idx is not None:
629
+ module.weight.data[module.padding_idx].zero_()
630
+ elif isinstance(module, nn.LayerNorm):
631
+ module.bias.data.zero_()
632
+ module.weight.data.fill_(1.0)
633
+ elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
634
+ create_sinusoidal_embeddings(
635
+ self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight
636
+ )
637
+
638
+
639
+ DISTILBERT_START_DOCSTRING = r"""
640
+
641
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
642
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
643
+ etc.)
644
+
645
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
646
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
647
+ and behavior.
648
+
649
+ Parameters:
650
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
651
+ Initializing with a config file does not load the weights associated with the model, only the
652
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
653
+ """
654
+
655
+ DISTILBERT_INPUTS_DOCSTRING = r"""
656
+ Args:
657
+ input_ids (`torch.LongTensor` of shape `({0})`):
658
+ Indices of input sequence tokens in the vocabulary.
659
+
660
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
661
+ [`PreTrainedTokenizer.__call__`] for details.
662
+
663
+ [What are input IDs?](../glossary#input-ids)
664
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
665
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
666
+
667
+ - 1 for tokens that are **not masked**,
668
+ - 0 for tokens that are **masked**.
669
+
670
+ [What are attention masks?](../glossary#attention-mask)
671
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
672
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
673
+
674
+ - 1 indicates the head is **not masked**,
675
+ - 0 indicates the head is **masked**.
676
+
677
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
678
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
679
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
680
+ model's internal embedding lookup matrix.
681
+ output_attentions (`bool`, *optional*):
682
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
683
+ tensors for more detail.
684
+ output_hidden_states (`bool`, *optional*):
685
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
686
+ more detail.
687
+ return_dict (`bool`, *optional*):
688
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
689
+ """
690
+
691
+
692
+ @add_start_docstrings(
693
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
694
+ DISTILBERT_START_DOCSTRING,
695
+ )
696
+ class DistilBertModel(DistilBertPreTrainedModel):
697
+ def __init__(self, config: PretrainedConfig):
698
+ super().__init__(config)
699
+
700
+ self.embeddings = Embeddings(config) # Embeddings
701
+ self.transformer = Transformer(config) # Encoder
702
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
703
+
704
+ # Initialize weights and apply final processing
705
+ self.post_init()
706
+
707
+ def get_position_embeddings(self) -> nn.Embedding:
708
+ """
709
+ Returns the position embeddings
710
+ """
711
+ return self.embeddings.position_embeddings
712
+
713
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
714
+ """
715
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
716
+
717
+ Arguments:
718
+ new_num_position_embeddings (`int`):
719
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
720
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
721
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
722
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
723
+ the size will remove vectors from the end.
724
+ """
725
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
726
+
727
+ # no resizing needs to be done if the length stays the same
728
+ if num_position_embeds_diff == 0:
729
+ return
730
+
731
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
732
+ self.config.max_position_embeddings = new_num_position_embeddings
733
+
734
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
735
+
736
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
737
+
738
+ if self.config.sinusoidal_pos_embds:
739
+ create_sinusoidal_embeddings(
740
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
741
+ )
742
+ else:
743
+ with torch.no_grad():
744
+ if num_position_embeds_diff > 0:
745
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
746
+ old_position_embeddings_weight
747
+ )
748
+ else:
749
+ self.embeddings.position_embeddings.weight = nn.Parameter(
750
+ old_position_embeddings_weight[:num_position_embeds_diff]
751
+ )
752
+ # move position_embeddings to correct device
753
+ self.embeddings.position_embeddings.to(self.device)
754
+
755
+ def get_input_embeddings(self) -> nn.Embedding:
756
+ return self.embeddings.word_embeddings
757
+
758
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
759
+ self.embeddings.word_embeddings = new_embeddings
760
+
761
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
762
+ """
763
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
764
+ class PreTrainedModel
765
+ """
766
+ for layer, heads in heads_to_prune.items():
767
+ self.transformer.layer[layer].attention.prune_heads(heads)
768
+
769
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
770
+ @add_code_sample_docstrings(
771
+ checkpoint=_CHECKPOINT_FOR_DOC,
772
+ output_type=BaseModelOutput,
773
+ config_class=_CONFIG_FOR_DOC,
774
+ )
775
+ def forward(
776
+ self,
777
+ input_ids: Optional[torch.Tensor] = None,
778
+ attention_mask: Optional[torch.Tensor] = None,
779
+ head_mask: Optional[torch.Tensor] = None,
780
+ inputs_embeds: Optional[torch.Tensor] = None,
781
+ output_attentions: Optional[bool] = None,
782
+ output_hidden_states: Optional[bool] = None,
783
+ return_dict: Optional[bool] = None,
784
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
785
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
786
+ output_hidden_states = (
787
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
788
+ )
789
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
790
+
791
+ if input_ids is not None and inputs_embeds is not None:
792
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
793
+ elif input_ids is not None:
794
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
795
+ input_shape = input_ids.size()
796
+ elif inputs_embeds is not None:
797
+ input_shape = inputs_embeds.size()[:-1]
798
+ else:
799
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
800
+
801
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
802
+
803
+ # Prepare head mask if needed
804
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
805
+
806
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
807
+
808
+ if self._use_flash_attention_2:
809
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
810
+ else:
811
+ if attention_mask is None:
812
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
813
+
814
+ return self.transformer(
815
+ x=embeddings,
816
+ attn_mask=attention_mask,
817
+ head_mask=head_mask,
818
+ output_attentions=output_attentions,
819
+ output_hidden_states=output_hidden_states,
820
+ return_dict=return_dict,
821
+ )
822
+
823
+
824
+ @add_start_docstrings(
825
+ """DistilBert Model with a `masked language modeling` head on top.""",
826
+ DISTILBERT_START_DOCSTRING,
827
+ )
828
+ class DistilBertForMaskedLM(DistilBertPreTrainedModel):
829
+ _tied_weights_keys = ["vocab_projector.weight"]
830
+
831
+ def __init__(self, config: PretrainedConfig):
832
+ super().__init__(config)
833
+
834
+ self.activation = get_activation(config.activation)
835
+
836
+ self.distilbert = DistilBertModel(config)
837
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
838
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
839
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
840
+
841
+ # Initialize weights and apply final processing
842
+ self.post_init()
843
+
844
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
845
+
846
+ def get_position_embeddings(self) -> nn.Embedding:
847
+ """
848
+ Returns the position embeddings
849
+ """
850
+ return self.distilbert.get_position_embeddings()
851
+
852
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
853
+ """
854
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
855
+
856
+ Arguments:
857
+ new_num_position_embeddings (`int`):
858
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
859
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
860
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
861
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
862
+ the size will remove vectors from the end.
863
+ """
864
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
865
+
866
+ def get_output_embeddings(self) -> nn.Module:
867
+ return self.vocab_projector
868
+
869
+ def set_output_embeddings(self, new_embeddings: nn.Module):
870
+ self.vocab_projector = new_embeddings
871
+
872
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
873
+ @add_code_sample_docstrings(
874
+ checkpoint=_CHECKPOINT_FOR_DOC,
875
+ output_type=MaskedLMOutput,
876
+ config_class=_CONFIG_FOR_DOC,
877
+ )
878
+ def forward(
879
+ self,
880
+ input_ids: Optional[torch.Tensor] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ head_mask: Optional[torch.Tensor] = None,
883
+ inputs_embeds: Optional[torch.Tensor] = None,
884
+ labels: Optional[torch.LongTensor] = None,
885
+ output_attentions: Optional[bool] = None,
886
+ output_hidden_states: Optional[bool] = None,
887
+ return_dict: Optional[bool] = None,
888
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
889
+ r"""
890
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
891
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
892
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
893
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
894
+ """
895
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
896
+
897
+ dlbrt_output = self.distilbert(
898
+ input_ids=input_ids,
899
+ attention_mask=attention_mask,
900
+ head_mask=head_mask,
901
+ inputs_embeds=inputs_embeds,
902
+ output_attentions=output_attentions,
903
+ output_hidden_states=output_hidden_states,
904
+ return_dict=return_dict,
905
+ )
906
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
907
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
908
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
909
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
910
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
911
+
912
+ mlm_loss = None
913
+ if labels is not None:
914
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
915
+
916
+ if not return_dict:
917
+ output = (prediction_logits,) + dlbrt_output[1:]
918
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
919
+
920
+ return MaskedLMOutput(
921
+ loss=mlm_loss,
922
+ logits=prediction_logits,
923
+ hidden_states=dlbrt_output.hidden_states,
924
+ attentions=dlbrt_output.attentions,
925
+ )
926
+
927
+
928
+ @add_start_docstrings(
929
+ """
930
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
931
+ pooled output) e.g. for GLUE tasks.
932
+ """,
933
+ DISTILBERT_START_DOCSTRING,
934
+ )
935
+ class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
936
+ def __init__(self, config: PretrainedConfig):
937
+ super().__init__(config)
938
+ self.num_labels = config.num_labels
939
+ self.config = config
940
+
941
+ self.distilbert = DistilBertModel(config)
942
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
943
+ self.classifier = nn.Linear(config.dim, config.num_labels)
944
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
945
+
946
+ # Initialize weights and apply final processing
947
+ self.post_init()
948
+
949
+ def get_position_embeddings(self) -> nn.Embedding:
950
+ """
951
+ Returns the position embeddings
952
+ """
953
+ return self.distilbert.get_position_embeddings()
954
+
955
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
956
+ """
957
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
958
+
959
+ Arguments:
960
+ new_num_position_embeddings (`int`):
961
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
962
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
963
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
964
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
965
+ the size will remove vectors from the end.
966
+ """
967
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
968
+
969
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
970
+ @add_code_sample_docstrings(
971
+ checkpoint=_CHECKPOINT_FOR_DOC,
972
+ output_type=SequenceClassifierOutput,
973
+ config_class=_CONFIG_FOR_DOC,
974
+ )
975
+ def forward(
976
+ self,
977
+ input_ids: Optional[torch.Tensor] = None,
978
+ attention_mask: Optional[torch.Tensor] = None,
979
+ head_mask: Optional[torch.Tensor] = None,
980
+ inputs_embeds: Optional[torch.Tensor] = None,
981
+ labels: Optional[torch.LongTensor] = None,
982
+ output_attentions: Optional[bool] = None,
983
+ output_hidden_states: Optional[bool] = None,
984
+ return_dict: Optional[bool] = None,
985
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
986
+ r"""
987
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
988
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
989
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
990
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
991
+ """
992
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
993
+
994
+ distilbert_output = self.distilbert(
995
+ input_ids=input_ids,
996
+ attention_mask=attention_mask,
997
+ head_mask=head_mask,
998
+ inputs_embeds=inputs_embeds,
999
+ output_attentions=output_attentions,
1000
+ output_hidden_states=output_hidden_states,
1001
+ return_dict=return_dict,
1002
+ )
1003
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
1004
+ pooled_output = hidden_state[:, 0] # (bs, dim)
1005
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
1006
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
1007
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
1008
+ logits = self.classifier(pooled_output) # (bs, num_labels)
1009
+
1010
+ loss = None
1011
+ if labels is not None:
1012
+ if self.config.problem_type is None:
1013
+ if self.num_labels == 1:
1014
+ self.config.problem_type = "regression"
1015
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1016
+ self.config.problem_type = "single_label_classification"
1017
+ else:
1018
+ self.config.problem_type = "multi_label_classification"
1019
+
1020
+ if self.config.problem_type == "regression":
1021
+ loss_fct = MSELoss()
1022
+ if self.num_labels == 1:
1023
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1024
+ else:
1025
+ loss = loss_fct(logits, labels)
1026
+ elif self.config.problem_type == "single_label_classification":
1027
+ loss_fct = CrossEntropyLoss()
1028
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1029
+ elif self.config.problem_type == "multi_label_classification":
1030
+ loss_fct = BCEWithLogitsLoss()
1031
+ loss = loss_fct(logits, labels)
1032
+
1033
+ if not return_dict:
1034
+ output = (logits,) + distilbert_output[1:]
1035
+ return ((loss,) + output) if loss is not None else output
1036
+
1037
+ return SequenceClassifierOutput(
1038
+ loss=loss,
1039
+ logits=logits,
1040
+ hidden_states=distilbert_output.hidden_states,
1041
+ attentions=distilbert_output.attentions,
1042
+ )
1043
+
1044
+
1045
+ @add_start_docstrings(
1046
+ """
1047
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1048
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1049
+ """,
1050
+ DISTILBERT_START_DOCSTRING,
1051
+ )
1052
+ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
1053
+ def __init__(self, config: PretrainedConfig):
1054
+ super().__init__(config)
1055
+
1056
+ self.distilbert = DistilBertModel(config)
1057
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
1058
+ if config.num_labels != 2:
1059
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
1060
+
1061
+ self.dropout = nn.Dropout(config.qa_dropout)
1062
+
1063
+ # Initialize weights and apply final processing
1064
+ self.post_init()
1065
+
1066
+ def get_position_embeddings(self) -> nn.Embedding:
1067
+ """
1068
+ Returns the position embeddings
1069
+ """
1070
+ return self.distilbert.get_position_embeddings()
1071
+
1072
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1073
+ """
1074
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1075
+
1076
+ Arguments:
1077
+ new_num_position_embeddings (`int`):
1078
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
1079
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
1080
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
1081
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
1082
+ the size will remove vectors from the end.
1083
+ """
1084
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1085
+
1086
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
1087
+ @add_code_sample_docstrings(
1088
+ checkpoint=_CHECKPOINT_FOR_DOC,
1089
+ output_type=QuestionAnsweringModelOutput,
1090
+ config_class=_CONFIG_FOR_DOC,
1091
+ )
1092
+ def forward(
1093
+ self,
1094
+ input_ids: Optional[torch.Tensor] = None,
1095
+ attention_mask: Optional[torch.Tensor] = None,
1096
+ head_mask: Optional[torch.Tensor] = None,
1097
+ inputs_embeds: Optional[torch.Tensor] = None,
1098
+ start_positions: Optional[torch.Tensor] = None,
1099
+ end_positions: Optional[torch.Tensor] = None,
1100
+ output_attentions: Optional[bool] = None,
1101
+ output_hidden_states: Optional[bool] = None,
1102
+ return_dict: Optional[bool] = None,
1103
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
1104
+ r"""
1105
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1106
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1107
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1108
+ are not taken into account for computing the loss.
1109
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1110
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1111
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1112
+ are not taken into account for computing the loss.
1113
+ """
1114
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1115
+
1116
+ distilbert_output = self.distilbert(
1117
+ input_ids=input_ids,
1118
+ attention_mask=attention_mask,
1119
+ head_mask=head_mask,
1120
+ inputs_embeds=inputs_embeds,
1121
+ output_attentions=output_attentions,
1122
+ output_hidden_states=output_hidden_states,
1123
+ return_dict=return_dict,
1124
+ )
1125
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
1126
+
1127
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
1128
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
1129
+ start_logits, end_logits = logits.split(1, dim=-1)
1130
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
1131
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
1132
+
1133
+ total_loss = None
1134
+ if start_positions is not None and end_positions is not None:
1135
+ # If we are on multi-GPU, split add a dimension
1136
+ if len(start_positions.size()) > 1:
1137
+ start_positions = start_positions.squeeze(-1)
1138
+ if len(end_positions.size()) > 1:
1139
+ end_positions = end_positions.squeeze(-1)
1140
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1141
+ ignored_index = start_logits.size(1)
1142
+ start_positions = start_positions.clamp(0, ignored_index)
1143
+ end_positions = end_positions.clamp(0, ignored_index)
1144
+
1145
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
1146
+ start_loss = loss_fct(start_logits, start_positions)
1147
+ end_loss = loss_fct(end_logits, end_positions)
1148
+ total_loss = (start_loss + end_loss) / 2
1149
+
1150
+ if not return_dict:
1151
+ output = (start_logits, end_logits) + distilbert_output[1:]
1152
+ return ((total_loss,) + output) if total_loss is not None else output
1153
+
1154
+ return QuestionAnsweringModelOutput(
1155
+ loss=total_loss,
1156
+ start_logits=start_logits,
1157
+ end_logits=end_logits,
1158
+ hidden_states=distilbert_output.hidden_states,
1159
+ attentions=distilbert_output.attentions,
1160
+ )
1161
+
1162
+
1163
+ @add_start_docstrings(
1164
+ """
1165
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1166
+ for Named-Entity-Recognition (NER) tasks.
1167
+ """,
1168
+ DISTILBERT_START_DOCSTRING,
1169
+ )
1170
+ class DistilBertForTokenClassification(DistilBertPreTrainedModel):
1171
+ def __init__(self, config: PretrainedConfig):
1172
+ super().__init__(config)
1173
+ self.num_labels = config.num_labels
1174
+
1175
+ self.distilbert = DistilBertModel(config)
1176
+ self.dropout = nn.Dropout(config.dropout)
1177
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1178
+
1179
+ # Initialize weights and apply final processing
1180
+ self.post_init()
1181
+
1182
+ def get_position_embeddings(self) -> nn.Embedding:
1183
+ """
1184
+ Returns the position embeddings
1185
+ """
1186
+ return self.distilbert.get_position_embeddings()
1187
+
1188
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1189
+ """
1190
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1191
+
1192
+ Arguments:
1193
+ new_num_position_embeddings (`int`):
1194
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
1195
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
1196
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
1197
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
1198
+ the size will remove vectors from the end.
1199
+ """
1200
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1201
+
1202
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
1203
+ @add_code_sample_docstrings(
1204
+ checkpoint=_CHECKPOINT_FOR_DOC,
1205
+ output_type=TokenClassifierOutput,
1206
+ config_class=_CONFIG_FOR_DOC,
1207
+ )
1208
+ def forward(
1209
+ self,
1210
+ input_ids: Optional[torch.Tensor] = None,
1211
+ attention_mask: Optional[torch.Tensor] = None,
1212
+ head_mask: Optional[torch.Tensor] = None,
1213
+ inputs_embeds: Optional[torch.Tensor] = None,
1214
+ labels: Optional[torch.LongTensor] = None,
1215
+ output_attentions: Optional[bool] = None,
1216
+ output_hidden_states: Optional[bool] = None,
1217
+ return_dict: Optional[bool] = None,
1218
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
1219
+ r"""
1220
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1221
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1222
+ """
1223
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1224
+
1225
+ outputs = self.distilbert(
1226
+ input_ids,
1227
+ attention_mask=attention_mask,
1228
+ head_mask=head_mask,
1229
+ inputs_embeds=inputs_embeds,
1230
+ output_attentions=output_attentions,
1231
+ output_hidden_states=output_hidden_states,
1232
+ return_dict=return_dict,
1233
+ )
1234
+
1235
+ sequence_output = outputs[0]
1236
+
1237
+ sequence_output = self.dropout(sequence_output)
1238
+ logits = self.classifier(sequence_output)
1239
+
1240
+ loss = None
1241
+ if labels is not None:
1242
+ loss_fct = CrossEntropyLoss()
1243
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1244
+
1245
+ if not return_dict:
1246
+ output = (logits,) + outputs[1:]
1247
+ return ((loss,) + output) if loss is not None else output
1248
+
1249
+ return TokenClassifierOutput(
1250
+ loss=loss,
1251
+ logits=logits,
1252
+ hidden_states=outputs.hidden_states,
1253
+ attentions=outputs.attentions,
1254
+ )
1255
+
1256
+
1257
+ @add_start_docstrings(
1258
+ """
1259
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1260
+ a softmax) e.g. for RocStories/SWAG tasks.
1261
+ """,
1262
+ DISTILBERT_START_DOCSTRING,
1263
+ )
1264
+ class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
1265
+ def __init__(self, config: PretrainedConfig):
1266
+ super().__init__(config)
1267
+
1268
+ self.distilbert = DistilBertModel(config)
1269
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
1270
+ self.classifier = nn.Linear(config.dim, 1)
1271
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
1272
+
1273
+ # Initialize weights and apply final processing
1274
+ self.post_init()
1275
+
1276
+ def get_position_embeddings(self) -> nn.Embedding:
1277
+ """
1278
+ Returns the position embeddings
1279
+ """
1280
+ return self.distilbert.get_position_embeddings()
1281
+
1282
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1283
+ """
1284
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1285
+
1286
+ Arguments:
1287
+ new_num_position_embeddings (`int`)
1288
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1289
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1290
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1291
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1292
+ will remove vectors from the end.
1293
+ """
1294
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1295
+
1296
+ @add_start_docstrings_to_model_forward(
1297
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1298
+ )
1299
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
1300
+ def forward(
1301
+ self,
1302
+ input_ids: Optional[torch.Tensor] = None,
1303
+ attention_mask: Optional[torch.Tensor] = None,
1304
+ head_mask: Optional[torch.Tensor] = None,
1305
+ inputs_embeds: Optional[torch.Tensor] = None,
1306
+ labels: Optional[torch.LongTensor] = None,
1307
+ output_attentions: Optional[bool] = None,
1308
+ output_hidden_states: Optional[bool] = None,
1309
+ return_dict: Optional[bool] = None,
1310
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
1311
+ r"""
1312
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1313
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1314
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1315
+ `input_ids` above)
1316
+
1317
+ Returns:
1318
+
1319
+ Examples:
1320
+
1321
+ ```python
1322
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
1323
+ >>> import torch
1324
+
1325
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
1326
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
1327
+
1328
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1329
+ >>> choice0 = "It is eaten with a fork and a knife."
1330
+ >>> choice1 = "It is eaten while held in the hand."
1331
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
1332
+
1333
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
1334
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
1335
+
1336
+ >>> # the linear classifier still needs to be trained
1337
+ >>> loss = outputs.loss
1338
+ >>> logits = outputs.logits
1339
+ ```"""
1340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1341
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1342
+
1343
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1344
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1345
+ inputs_embeds = (
1346
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1347
+ if inputs_embeds is not None
1348
+ else None
1349
+ )
1350
+
1351
+ outputs = self.distilbert(
1352
+ input_ids,
1353
+ attention_mask=attention_mask,
1354
+ head_mask=head_mask,
1355
+ inputs_embeds=inputs_embeds,
1356
+ output_attentions=output_attentions,
1357
+ output_hidden_states=output_hidden_states,
1358
+ return_dict=return_dict,
1359
+ )
1360
+
1361
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
1362
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
1363
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
1364
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
1365
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
1366
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
1367
+
1368
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
1369
+
1370
+ loss = None
1371
+ if labels is not None:
1372
+ loss_fct = CrossEntropyLoss()
1373
+ loss = loss_fct(reshaped_logits, labels)
1374
+
1375
+ if not return_dict:
1376
+ output = (reshaped_logits,) + outputs[1:]
1377
+ return ((loss,) + output) if loss is not None else output
1378
+
1379
+ return MultipleChoiceModelOutput(
1380
+ loss=loss,
1381
+ logits=reshaped_logits,
1382
+ hidden_states=outputs.hidden_states,
1383
+ attentions=outputs.attentions,
1384
+ )
venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py ADDED
@@ -0,0 +1,895 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import math
17
+ from typing import Callable, Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.traverse_util import flatten_dict, unflatten_dict
25
+ from jax import lax
26
+
27
+ from ...modeling_flax_outputs import (
28
+ FlaxBaseModelOutput,
29
+ FlaxMaskedLMOutput,
30
+ FlaxMultipleChoiceModelOutput,
31
+ FlaxQuestionAnsweringModelOutput,
32
+ FlaxSequenceClassifierOutput,
33
+ FlaxTokenClassifierOutput,
34
+ )
35
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_distilbert import DistilBertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
43
+ _CONFIG_FOR_DOC = "DistilBertConfig"
44
+
45
+
46
+ FLAX_DISTILBERT_START_DOCSTRING = r"""
47
+
48
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
50
+
51
+ This model is also a
52
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
53
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
54
+ behavior.
55
+
56
+ Finally, this model supports inherent JAX features such as:
57
+
58
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
59
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
60
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
61
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
62
+
63
+ Parameters:
64
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
65
+ Initializing with a config file does not load the weights associated with the model, only the
66
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
67
+ """
68
+
69
+ DISTILBERT_INPUTS_DOCSTRING = r"""
70
+ Args:
71
+ input_ids (`numpy.ndarray` of shape `({0})`):
72
+ Indices of input sequence tokens in the vocabulary.
73
+
74
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
75
+ [`PreTrainedTokenizer.__call__`] for details.
76
+
77
+ [What are input IDs?](../glossary#input-ids)
78
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
79
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
80
+
81
+ - 1 for tokens that are **not masked**,
82
+ - 0 for tokens that are **masked**.
83
+
84
+ [What are attention masks?](../glossary#attention-mask)
85
+ output_attentions (`bool`, *optional*):
86
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
87
+ tensors for more detail.
88
+ output_hidden_states (`bool`, *optional*):
89
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
90
+ more detail.
91
+ return_dict (`bool`, *optional*):
92
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
93
+ """
94
+
95
+
96
+ def get_angles(pos, i, d_model):
97
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
98
+ return pos * angle_rates
99
+
100
+
101
+ def positional_encoding(position, d_model):
102
+ # create the sinusoidal pattern for the positional encoding
103
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
104
+
105
+ # apply sin to even indices in the array; 2i
106
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
107
+
108
+ # apply cos to odd indices in the array; 2i+1
109
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
110
+
111
+ pos_encoding = angle_rads[np.newaxis, ...]
112
+
113
+ return jnp.array(pos_encoding)
114
+
115
+
116
+ class FlaxEmbeddings(nn.Module):
117
+ """Construct the embeddings from word, position and token_type embeddings."""
118
+
119
+ config: DistilBertConfig
120
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
121
+
122
+ def setup(self):
123
+ self.word_embeddings = nn.Embed(
124
+ self.config.vocab_size,
125
+ self.config.dim,
126
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
127
+ )
128
+ if not self.config.sinusoidal_pos_embds:
129
+ self.position_embeddings = nn.Embed(
130
+ self.config.max_position_embeddings,
131
+ self.config.dim,
132
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
133
+ )
134
+ else:
135
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
136
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
137
+ self.dropout = nn.Dropout(rate=self.config.dropout)
138
+
139
+ def __call__(self, input_ids, deterministic: bool = True):
140
+ # Embed
141
+ batch_size, seq_length = input_ids.shape
142
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
143
+ if not self.config.sinusoidal_pos_embds:
144
+ position_ids = jnp.arange(seq_length).astype("i4")
145
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
146
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
147
+ else:
148
+ position_embeds = self.pos_encoding[:, :seq_length, :]
149
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
150
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
151
+
152
+ # Sum all embeddings
153
+ hidden_states = inputs_embeds + position_embeds
154
+
155
+ # Layer Norm
156
+ hidden_states = self.LayerNorm(hidden_states)
157
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
158
+ return hidden_states
159
+
160
+
161
+ class FlaxMultiHeadSelfAttention(nn.Module):
162
+ config: DistilBertConfig
163
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
164
+
165
+ def setup(self):
166
+ self.n_heads = self.config.n_heads
167
+ self.dim = self.config.dim
168
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
169
+
170
+ if not (self.dim % self.n_heads == 0):
171
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
172
+
173
+ self.q_lin = nn.Dense(
174
+ self.dim,
175
+ dtype=self.dtype,
176
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
177
+ )
178
+ self.k_lin = nn.Dense(
179
+ self.dim,
180
+ dtype=self.dtype,
181
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
182
+ )
183
+ self.v_lin = nn.Dense(
184
+ self.dim,
185
+ dtype=self.dtype,
186
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
187
+ )
188
+ self.out_lin = nn.Dense(
189
+ self.dim,
190
+ dtype=self.dtype,
191
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
192
+ )
193
+
194
+ def __call__(
195
+ self,
196
+ query,
197
+ key,
198
+ value,
199
+ mask,
200
+ deterministic: bool = True,
201
+ output_attentions: bool = False,
202
+ ):
203
+ bs, q_len, dim = query.shape
204
+ k_len = key.shape[1]
205
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
206
+ # assert key.size() == value.size()
207
+
208
+ dim_per_head = self.dim // self.n_heads
209
+
210
+ mask_reshp = (bs, 1, 1, k_len)
211
+
212
+ def shape(x):
213
+ """separate heads"""
214
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
215
+
216
+ def unshape(x):
217
+ """group heads"""
218
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
219
+
220
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
221
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
222
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
223
+
224
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
225
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
226
+ mask = jnp.reshape(mask, mask_reshp)
227
+
228
+ mask = mask.astype(scores.dtype)
229
+ scores = scores - 1e30 * (1.0 - mask)
230
+
231
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
232
+ weights = self.dropout(weights, deterministic=deterministic)
233
+
234
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
235
+ context = unshape(context) # (bs, q_len, dim)
236
+ context = self.out_lin(context) # (bs, q_len, dim)
237
+
238
+ if output_attentions:
239
+ return (context, weights)
240
+ else:
241
+ return (context,)
242
+
243
+
244
+ class FlaxFFN(nn.Module):
245
+ config: DistilBertConfig
246
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
247
+
248
+ def setup(self):
249
+ self.dropout = nn.Dropout(rate=self.config.dropout)
250
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
251
+ self.seq_len_dim = 1
252
+ self.lin1 = nn.Dense(
253
+ self.config.hidden_dim,
254
+ dtype=self.dtype,
255
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
256
+ )
257
+ self.lin2 = nn.Dense(
258
+ self.config.dim,
259
+ dtype=self.dtype,
260
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
261
+ )
262
+
263
+ self.activation = ACT2FN[self.config.activation]
264
+
265
+ def __call__(self, hidden_states, deterministic: bool = True):
266
+ hidden_states = self.lin1(hidden_states)
267
+ hidden_states = self.activation(hidden_states)
268
+ hidden_states = self.lin2(hidden_states)
269
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
270
+ return hidden_states
271
+
272
+
273
+ class FlaxTransformerBlock(nn.Module):
274
+ config: DistilBertConfig
275
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
276
+
277
+ def setup(self):
278
+ assert (
279
+ self.config.dim % self.config.n_heads == 0
280
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
281
+
282
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
283
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
284
+
285
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
286
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
287
+
288
+ def __call__(
289
+ self,
290
+ hidden_states,
291
+ attn_mask,
292
+ output_attentions: bool = False,
293
+ deterministic: bool = True,
294
+ ):
295
+ # Self-Attention
296
+ sa_output = self.attention(
297
+ query=hidden_states,
298
+ key=hidden_states,
299
+ value=hidden_states,
300
+ mask=attn_mask,
301
+ output_attentions=output_attentions,
302
+ deterministic=deterministic,
303
+ )
304
+ if output_attentions:
305
+ sa_output, sa_weights = sa_output
306
+ else:
307
+ assert type(sa_output) == tuple
308
+ sa_output = sa_output[0]
309
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
310
+
311
+ # Feed Forward Network
312
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
313
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
314
+ output = (ffn_output,)
315
+ if output_attentions:
316
+ output = (sa_weights,) + output
317
+ return output
318
+
319
+
320
+ class FlaxTransformer(nn.Module):
321
+ config: DistilBertConfig
322
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
323
+
324
+ def setup(self):
325
+ self.layers = [
326
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
327
+ ]
328
+
329
+ def __call__(
330
+ self,
331
+ hidden_states,
332
+ attention_mask,
333
+ output_attentions: bool = False,
334
+ output_hidden_states: bool = False,
335
+ deterministic: bool = True,
336
+ return_dict: bool = False,
337
+ ):
338
+ all_hidden_states = () if output_hidden_states else None
339
+ all_attentions = () if output_attentions else None
340
+
341
+ for layer_module in self.layers:
342
+ if output_hidden_states:
343
+ all_hidden_states = all_hidden_states + (hidden_states,)
344
+
345
+ layer_outputs = layer_module(
346
+ hidden_states=hidden_states,
347
+ attn_mask=attention_mask,
348
+ output_attentions=output_attentions,
349
+ deterministic=deterministic,
350
+ )
351
+ hidden_states = layer_outputs[-1]
352
+
353
+ if output_attentions:
354
+ assert len(layer_outputs) == 2
355
+ attentions = layer_outputs[0]
356
+ all_attentions = all_attentions + (attentions,)
357
+ else:
358
+ assert len(layer_outputs) == 1
359
+
360
+ # Add last layer
361
+ if output_hidden_states:
362
+ all_hidden_states = all_hidden_states + (hidden_states,)
363
+
364
+ if not return_dict:
365
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
366
+ return FlaxBaseModelOutput(
367
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
368
+ )
369
+
370
+
371
+ class FlaxTransformerEncoder(nn.Module):
372
+ config: DistilBertConfig
373
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
374
+
375
+ def setup(self):
376
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
377
+
378
+ def __call__(
379
+ self,
380
+ hidden_states,
381
+ attention_mask,
382
+ output_attentions: bool = False,
383
+ output_hidden_states: bool = False,
384
+ deterministic: bool = True,
385
+ return_dict: bool = False,
386
+ ):
387
+ return self.layer(
388
+ hidden_states=hidden_states,
389
+ attention_mask=attention_mask,
390
+ output_attentions=output_attentions,
391
+ output_hidden_states=output_hidden_states,
392
+ deterministic=deterministic,
393
+ return_dict=return_dict,
394
+ )
395
+
396
+
397
+ class FlaxDistilBertLMDecoder(nn.Module):
398
+ config: DistilBertConfig
399
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
400
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
401
+
402
+ def setup(self):
403
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
404
+
405
+ def __call__(self, inputs, kernel):
406
+ inputs = jnp.asarray(inputs, self.dtype)
407
+ kernel = jnp.asarray(kernel, self.dtype)
408
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
409
+ bias = jnp.asarray(self.bias, self.dtype)
410
+ y = y + bias
411
+ return y
412
+
413
+
414
+ class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
415
+ """
416
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
417
+ models.
418
+ """
419
+
420
+ config_class = DistilBertConfig
421
+ base_model_prefix = "distilbert"
422
+ module_class: nn.Module = None
423
+
424
+ def __init__(
425
+ self,
426
+ config: DistilBertConfig,
427
+ input_shape: Tuple = (1, 1),
428
+ seed: int = 0,
429
+ dtype: jnp.dtype = jnp.float32,
430
+ _do_init: bool = True,
431
+ **kwargs,
432
+ ):
433
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
434
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
435
+
436
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
437
+ # init input tensors
438
+ input_ids = jnp.zeros(input_shape, dtype="i4")
439
+ attention_mask = jnp.ones_like(input_ids)
440
+
441
+ params_rng, dropout_rng = jax.random.split(rng)
442
+ rngs = {"params": params_rng, "dropout": dropout_rng}
443
+
444
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
445
+
446
+ if params is not None:
447
+ random_params = flatten_dict(unfreeze(random_params))
448
+ params = flatten_dict(unfreeze(params))
449
+ for missing_key in self._missing_keys:
450
+ params[missing_key] = random_params[missing_key]
451
+ self._missing_keys = set()
452
+ return freeze(unflatten_dict(params))
453
+ else:
454
+ return random_params
455
+
456
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
457
+ def __call__(
458
+ self,
459
+ input_ids,
460
+ attention_mask=None,
461
+ head_mask=None,
462
+ params: dict = None,
463
+ dropout_rng: jax.random.PRNGKey = None,
464
+ train: bool = False,
465
+ output_attentions: Optional[bool] = None,
466
+ output_hidden_states: Optional[bool] = None,
467
+ return_dict: Optional[bool] = None,
468
+ ):
469
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
470
+ output_hidden_states = (
471
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
472
+ )
473
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
474
+
475
+ if attention_mask is None:
476
+ attention_mask = jnp.ones_like(input_ids)
477
+
478
+ # Handle any PRNG if needed
479
+ rngs = {}
480
+ if dropout_rng is not None:
481
+ rngs["dropout"] = dropout_rng
482
+
483
+ return self.module.apply(
484
+ {"params": params or self.params},
485
+ jnp.array(input_ids, dtype="i4"),
486
+ jnp.array(attention_mask, dtype="i4"),
487
+ not train,
488
+ output_attentions,
489
+ output_hidden_states,
490
+ return_dict,
491
+ rngs=rngs,
492
+ )
493
+
494
+
495
+ class FlaxDistilBertModule(nn.Module):
496
+ config: DistilBertConfig
497
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
498
+
499
+ def setup(self):
500
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
501
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
502
+
503
+ def __call__(
504
+ self,
505
+ input_ids,
506
+ attention_mask,
507
+ deterministic: bool = True,
508
+ output_attentions: bool = False,
509
+ output_hidden_states: bool = False,
510
+ return_dict: bool = True,
511
+ ):
512
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
513
+ output_hidden_states = (
514
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
515
+ )
516
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
517
+
518
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
519
+ return self.transformer(
520
+ hidden_states=input_embeds,
521
+ attention_mask=attention_mask,
522
+ deterministic=deterministic,
523
+ output_attentions=output_attentions,
524
+ output_hidden_states=output_hidden_states,
525
+ return_dict=return_dict,
526
+ )
527
+
528
+
529
+ @add_start_docstrings(
530
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
531
+ FLAX_DISTILBERT_START_DOCSTRING,
532
+ )
533
+ class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
534
+ module_class = FlaxDistilBertModule
535
+
536
+
537
+ append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
538
+
539
+
540
+ class FlaxDistilBertForMaskedLMModule(nn.Module):
541
+ config: DistilBertConfig
542
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
543
+
544
+ def setup(self):
545
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
546
+ self.vocab_transform = nn.Dense(
547
+ self.config.dim,
548
+ dtype=self.dtype,
549
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
550
+ )
551
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
552
+ if self.config.tie_word_embeddings:
553
+ self.vocab_projector = FlaxDistilBertLMDecoder(
554
+ self.config,
555
+ dtype=self.dtype,
556
+ )
557
+ else:
558
+ self.vocab_projector = nn.Dense(
559
+ self.config.vocab_size,
560
+ dtype=self.dtype,
561
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
562
+ )
563
+
564
+ def __call__(
565
+ self,
566
+ input_ids,
567
+ attention_mask,
568
+ deterministic: bool = True,
569
+ output_attentions: bool = False,
570
+ output_hidden_states: bool = False,
571
+ return_dict: bool = True,
572
+ ):
573
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
574
+
575
+ dlbrt_output = self.distilbert(
576
+ input_ids=input_ids,
577
+ attention_mask=attention_mask,
578
+ output_attentions=output_attentions,
579
+ output_hidden_states=output_hidden_states,
580
+ deterministic=deterministic,
581
+ return_dict=return_dict,
582
+ )
583
+ hidden_states = dlbrt_output[0]
584
+ prediction_logits = self.vocab_transform(hidden_states)
585
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
586
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
587
+
588
+ if self.config.tie_word_embeddings:
589
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
590
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
591
+ else:
592
+ prediction_logits = self.vocab_projector(prediction_logits)
593
+
594
+ if not return_dict:
595
+ output = (prediction_logits,) + dlbrt_output[1:]
596
+ return output
597
+
598
+ return FlaxMaskedLMOutput(
599
+ logits=prediction_logits,
600
+ hidden_states=dlbrt_output.hidden_states,
601
+ attentions=dlbrt_output.attentions,
602
+ )
603
+
604
+
605
+ @add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
606
+ class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
607
+ module_class = FlaxDistilBertForMaskedLMModule
608
+
609
+
610
+ append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
611
+
612
+
613
+ class FlaxDistilBertForSequenceClassificationModule(nn.Module):
614
+ config: DistilBertConfig
615
+ dtype: jnp.dtype = jnp.float32
616
+
617
+ def setup(self):
618
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
619
+ self.pre_classifier = nn.Dense(
620
+ self.config.dim,
621
+ dtype=self.dtype,
622
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
623
+ )
624
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
625
+ self.classifier = nn.Dense(
626
+ self.config.num_labels,
627
+ dtype=self.dtype,
628
+ )
629
+
630
+ def __call__(
631
+ self,
632
+ input_ids,
633
+ attention_mask,
634
+ deterministic: bool = True,
635
+ output_attentions: bool = False,
636
+ output_hidden_states: bool = False,
637
+ return_dict: bool = True,
638
+ ):
639
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
640
+ # Model
641
+ distilbert_output = self.distilbert(
642
+ input_ids,
643
+ attention_mask,
644
+ deterministic=deterministic,
645
+ output_attentions=output_attentions,
646
+ output_hidden_states=output_hidden_states,
647
+ return_dict=return_dict,
648
+ )
649
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
650
+ pooled_output = hidden_state[:, 0] # (bs, dim)
651
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
652
+ pooled_output = ACT2FN["relu"](pooled_output)
653
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
654
+ logits = self.classifier(pooled_output) # (bs, dim)
655
+
656
+ if not return_dict:
657
+ return (logits,) + distilbert_output[1:]
658
+
659
+ return FlaxSequenceClassifierOutput(
660
+ logits=logits,
661
+ hidden_states=distilbert_output.hidden_states,
662
+ attentions=distilbert_output.attentions,
663
+ )
664
+
665
+
666
+ @add_start_docstrings(
667
+ """
668
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
669
+ pooled output) e.g. for GLUE tasks.
670
+ """,
671
+ FLAX_DISTILBERT_START_DOCSTRING,
672
+ )
673
+ class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
674
+ module_class = FlaxDistilBertForSequenceClassificationModule
675
+
676
+
677
+ append_call_sample_docstring(
678
+ FlaxDistilBertForSequenceClassification,
679
+ _CHECKPOINT_FOR_DOC,
680
+ FlaxSequenceClassifierOutput,
681
+ _CONFIG_FOR_DOC,
682
+ )
683
+
684
+
685
+ class FlaxDistilBertForMultipleChoiceModule(nn.Module):
686
+ config: DistilBertConfig
687
+ dtype: jnp.dtype = jnp.float32
688
+
689
+ def setup(self):
690
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
691
+ self.pre_classifier = nn.Dense(
692
+ self.config.dim,
693
+ dtype=self.dtype,
694
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
695
+ )
696
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
697
+ self.classifier = nn.Dense(
698
+ 1,
699
+ dtype=self.dtype,
700
+ )
701
+
702
+ def __call__(
703
+ self,
704
+ input_ids,
705
+ attention_mask,
706
+ deterministic: bool = True,
707
+ output_attentions: bool = False,
708
+ output_hidden_states: bool = False,
709
+ return_dict: bool = True,
710
+ ):
711
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
712
+ num_choices = input_ids.shape[1]
713
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
714
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
715
+
716
+ # Model
717
+ outputs = self.distilbert(
718
+ input_ids,
719
+ attention_mask,
720
+ deterministic=deterministic,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ )
725
+
726
+ hidden_state = outputs[0]
727
+ pooled_output = hidden_state[:, 0]
728
+ pooled_output = self.pre_classifier(pooled_output)
729
+ pooled_output = ACT2FN["relu"](pooled_output)
730
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
731
+ logits = self.classifier(pooled_output)
732
+
733
+ reshaped_logits = logits.reshape(-1, num_choices)
734
+
735
+ if not return_dict:
736
+ return (reshaped_logits,) + outputs[2:]
737
+
738
+ return FlaxMultipleChoiceModelOutput(
739
+ logits=reshaped_logits,
740
+ hidden_states=outputs.hidden_states,
741
+ attentions=outputs.attentions,
742
+ )
743
+
744
+
745
+ @add_start_docstrings(
746
+ """
747
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
748
+ a softmax) e.g. for RocStories/SWAG tasks.
749
+ """,
750
+ FLAX_DISTILBERT_START_DOCSTRING,
751
+ )
752
+ class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
753
+ module_class = FlaxDistilBertForMultipleChoiceModule
754
+
755
+
756
+ overwrite_call_docstring(
757
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
758
+ )
759
+ append_call_sample_docstring(
760
+ FlaxDistilBertForMultipleChoice,
761
+ _CHECKPOINT_FOR_DOC,
762
+ FlaxMultipleChoiceModelOutput,
763
+ _CONFIG_FOR_DOC,
764
+ )
765
+
766
+
767
+ class FlaxDistilBertForTokenClassificationModule(nn.Module):
768
+ config: DistilBertConfig
769
+ dtype: jnp.dtype = jnp.float32
770
+
771
+ def setup(self):
772
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
773
+ self.dropout = nn.Dropout(rate=self.config.dropout)
774
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
775
+
776
+ def __call__(
777
+ self,
778
+ input_ids,
779
+ attention_mask,
780
+ deterministic: bool = True,
781
+ output_attentions: bool = False,
782
+ output_hidden_states: bool = False,
783
+ return_dict: bool = True,
784
+ ):
785
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
786
+ # Model
787
+ outputs = self.distilbert(
788
+ input_ids,
789
+ attention_mask,
790
+ deterministic=deterministic,
791
+ output_attentions=output_attentions,
792
+ output_hidden_states=output_hidden_states,
793
+ return_dict=return_dict,
794
+ )
795
+
796
+ hidden_states = outputs[0]
797
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
798
+ logits = self.classifier(hidden_states)
799
+
800
+ if not return_dict:
801
+ return (logits,) + outputs[1:]
802
+
803
+ return FlaxTokenClassifierOutput(
804
+ logits=logits,
805
+ hidden_states=outputs.hidden_states,
806
+ attentions=outputs.attentions,
807
+ )
808
+
809
+
810
+ @add_start_docstrings(
811
+ """
812
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
813
+ for Named-Entity-Recognition (NER) tasks.
814
+ """,
815
+ FLAX_DISTILBERT_START_DOCSTRING,
816
+ )
817
+ class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
818
+ module_class = FlaxDistilBertForTokenClassificationModule
819
+
820
+
821
+ append_call_sample_docstring(
822
+ FlaxDistilBertForTokenClassification,
823
+ _CHECKPOINT_FOR_DOC,
824
+ FlaxTokenClassifierOutput,
825
+ _CONFIG_FOR_DOC,
826
+ )
827
+
828
+
829
+ class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
830
+ config: DistilBertConfig
831
+ dtype: jnp.dtype = jnp.float32
832
+
833
+ def setup(self):
834
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
835
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
836
+ assert self.config.num_labels == 2
837
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
838
+
839
+ def __call__(
840
+ self,
841
+ input_ids,
842
+ attention_mask,
843
+ deterministic: bool = True,
844
+ output_attentions: bool = False,
845
+ output_hidden_states: bool = False,
846
+ return_dict: bool = True,
847
+ ):
848
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
849
+
850
+ # Model
851
+ distilbert_output = self.distilbert(
852
+ input_ids,
853
+ attention_mask,
854
+ deterministic=deterministic,
855
+ output_attentions=output_attentions,
856
+ output_hidden_states=output_hidden_states,
857
+ return_dict=return_dict,
858
+ )
859
+
860
+ hidden_states = distilbert_output[0]
861
+
862
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
863
+ logits = self.qa_outputs(hidden_states)
864
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
865
+ start_logits = start_logits.squeeze(-1)
866
+ end_logits = end_logits.squeeze(-1)
867
+
868
+ if not return_dict:
869
+ return (start_logits, end_logits) + distilbert_output[1:]
870
+
871
+ return FlaxQuestionAnsweringModelOutput(
872
+ start_logits=start_logits,
873
+ end_logits=end_logits,
874
+ hidden_states=distilbert_output.hidden_states,
875
+ attentions=distilbert_output.attentions,
876
+ )
877
+
878
+
879
+ @add_start_docstrings(
880
+ """
881
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
882
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
883
+ """,
884
+ FLAX_DISTILBERT_START_DOCSTRING,
885
+ )
886
+ class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
887
+ module_class = FlaxDistilBertForQuestionAnsweringModule
888
+
889
+
890
+ append_call_sample_docstring(
891
+ FlaxDistilBertForQuestionAnswering,
892
+ _CHECKPOINT_FOR_DOC,
893
+ FlaxQuestionAnsweringModelOutput,
894
+ _CONFIG_FOR_DOC,
895
+ )
venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py ADDED
@@ -0,0 +1,1139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ TF 2.0 DistilBERT model
17
+ """
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFMaskedLMOutput,
32
+ TFMultipleChoiceModelOutput,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutput,
35
+ TFTokenClassifierOutput,
36
+ )
37
+ from ...modeling_tf_utils import (
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFTokenClassificationLoss,
45
+ get_initializer,
46
+ keras,
47
+ keras_serializable,
48
+ unpack_inputs,
49
+ )
50
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
51
+ from ...utils import (
52
+ add_code_sample_docstrings,
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ logging,
56
+ )
57
+ from .configuration_distilbert import DistilBertConfig
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
63
+ _CONFIG_FOR_DOC = "DistilBertConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ class TFEmbeddings(keras.layers.Layer):
70
+ """Construct the embeddings from word, position and token_type embeddings."""
71
+
72
+ def __init__(self, config, **kwargs):
73
+ super().__init__(**kwargs)
74
+ self.config = config
75
+ self.dim = config.dim
76
+ self.initializer_range = config.initializer_range
77
+ self.max_position_embeddings = config.max_position_embeddings
78
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
79
+ self.dropout = keras.layers.Dropout(rate=config.dropout)
80
+
81
+ def build(self, input_shape=None):
82
+ with tf.name_scope("word_embeddings"):
83
+ self.weight = self.add_weight(
84
+ name="weight",
85
+ shape=[self.config.vocab_size, self.dim],
86
+ initializer=get_initializer(initializer_range=self.initializer_range),
87
+ )
88
+
89
+ with tf.name_scope("position_embeddings"):
90
+ self.position_embeddings = self.add_weight(
91
+ name="embeddings",
92
+ shape=[self.max_position_embeddings, self.dim],
93
+ initializer=get_initializer(initializer_range=self.initializer_range),
94
+ )
95
+
96
+ if self.built:
97
+ return
98
+ self.built = True
99
+ if getattr(self, "LayerNorm", None) is not None:
100
+ with tf.name_scope(self.LayerNorm.name):
101
+ self.LayerNorm.build([None, None, self.config.dim])
102
+
103
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
104
+ """
105
+ Applies embedding based on inputs tensor.
106
+
107
+ Returns:
108
+ final_embeddings (`tf.Tensor`): output embedding tensor.
109
+ """
110
+ assert not (input_ids is None and inputs_embeds is None)
111
+
112
+ if input_ids is not None:
113
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
114
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
115
+
116
+ input_shape = shape_list(inputs_embeds)[:-1]
117
+
118
+ if position_ids is None:
119
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
120
+
121
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
122
+ final_embeddings = inputs_embeds + position_embeds
123
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
124
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
125
+
126
+ return final_embeddings
127
+
128
+
129
+ class TFMultiHeadSelfAttention(keras.layers.Layer):
130
+ def __init__(self, config, **kwargs):
131
+ super().__init__(**kwargs)
132
+
133
+ self.n_heads = config.n_heads
134
+ self.dim = config.dim
135
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
136
+ self.output_attentions = config.output_attentions
137
+
138
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
139
+
140
+ self.q_lin = keras.layers.Dense(
141
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
142
+ )
143
+ self.k_lin = keras.layers.Dense(
144
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
145
+ )
146
+ self.v_lin = keras.layers.Dense(
147
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
148
+ )
149
+ self.out_lin = keras.layers.Dense(
150
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
151
+ )
152
+
153
+ self.pruned_heads = set()
154
+ self.config = config
155
+
156
+ def prune_heads(self, heads):
157
+ raise NotImplementedError
158
+
159
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
160
+ """
161
+ Parameters:
162
+ query: tf.Tensor(bs, seq_length, dim)
163
+ key: tf.Tensor(bs, seq_length, dim)
164
+ value: tf.Tensor(bs, seq_length, dim)
165
+ mask: tf.Tensor(bs, seq_length)
166
+
167
+ Returns:
168
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
169
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
170
+ """
171
+ bs, q_length, dim = shape_list(query)
172
+ k_length = shape_list(key)[1]
173
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
174
+ # assert key.size() == value.size()
175
+ dim_per_head = int(self.dim / self.n_heads)
176
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
177
+ mask_reshape = [bs, 1, 1, k_length]
178
+
179
+ def shape(x):
180
+ """separate heads"""
181
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
182
+
183
+ def unshape(x):
184
+ """group heads"""
185
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
186
+
187
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
188
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
189
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
190
+ q = tf.cast(q, dtype=tf.float32)
191
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
192
+ k = tf.cast(k, dtype=q.dtype)
193
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
194
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
195
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
196
+
197
+ mask = tf.cast(mask, dtype=scores.dtype)
198
+ scores = scores - 1e30 * (1.0 - mask)
199
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
200
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
201
+
202
+ # Mask heads if we want to
203
+ if head_mask is not None:
204
+ weights = weights * head_mask
205
+
206
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
207
+ context = unshape(context) # (bs, q_length, dim)
208
+ context = self.out_lin(context) # (bs, q_length, dim)
209
+
210
+ if output_attentions:
211
+ return (context, weights)
212
+ else:
213
+ return (context,)
214
+
215
+ def build(self, input_shape=None):
216
+ if self.built:
217
+ return
218
+ self.built = True
219
+ if getattr(self, "q_lin", None) is not None:
220
+ with tf.name_scope(self.q_lin.name):
221
+ self.q_lin.build([None, None, self.config.dim])
222
+ if getattr(self, "k_lin", None) is not None:
223
+ with tf.name_scope(self.k_lin.name):
224
+ self.k_lin.build([None, None, self.config.dim])
225
+ if getattr(self, "v_lin", None) is not None:
226
+ with tf.name_scope(self.v_lin.name):
227
+ self.v_lin.build([None, None, self.config.dim])
228
+ if getattr(self, "out_lin", None) is not None:
229
+ with tf.name_scope(self.out_lin.name):
230
+ self.out_lin.build([None, None, self.config.dim])
231
+
232
+
233
+ class TFFFN(keras.layers.Layer):
234
+ def __init__(self, config, **kwargs):
235
+ super().__init__(**kwargs)
236
+ self.dropout = keras.layers.Dropout(config.dropout)
237
+ self.lin1 = keras.layers.Dense(
238
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
239
+ )
240
+ self.lin2 = keras.layers.Dense(
241
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
242
+ )
243
+ self.activation = get_tf_activation(config.activation)
244
+ self.config = config
245
+
246
+ def call(self, input, training=False):
247
+ x = self.lin1(input)
248
+ x = self.activation(x)
249
+ x = self.lin2(x)
250
+ x = self.dropout(x, training=training)
251
+ return x
252
+
253
+ def build(self, input_shape=None):
254
+ if self.built:
255
+ return
256
+ self.built = True
257
+ if getattr(self, "lin1", None) is not None:
258
+ with tf.name_scope(self.lin1.name):
259
+ self.lin1.build([None, None, self.config.dim])
260
+ if getattr(self, "lin2", None) is not None:
261
+ with tf.name_scope(self.lin2.name):
262
+ self.lin2.build([None, None, self.config.hidden_dim])
263
+
264
+
265
+ class TFTransformerBlock(keras.layers.Layer):
266
+ def __init__(self, config, **kwargs):
267
+ super().__init__(**kwargs)
268
+
269
+ self.n_heads = config.n_heads
270
+ self.dim = config.dim
271
+ self.hidden_dim = config.hidden_dim
272
+ self.dropout = keras.layers.Dropout(config.dropout)
273
+ self.activation = config.activation
274
+ self.output_attentions = config.output_attentions
275
+
276
+ assert (
277
+ config.dim % config.n_heads == 0
278
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
279
+
280
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
281
+ self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
282
+
283
+ self.ffn = TFFFN(config, name="ffn")
284
+ self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
285
+ self.config = config
286
+
287
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
288
+ """
289
+ Parameters:
290
+ x: tf.Tensor(bs, seq_length, dim)
291
+ attn_mask: tf.Tensor(bs, seq_length)
292
+
293
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
294
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
295
+ """
296
+ # Self-Attention
297
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
298
+ if output_attentions:
299
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
300
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
301
+ # assert type(sa_output) == tuple
302
+ sa_output = sa_output[0]
303
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
304
+
305
+ # Feed Forward Network
306
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
307
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
308
+
309
+ output = (ffn_output,)
310
+ if output_attentions:
311
+ output = (sa_weights,) + output
312
+ return output
313
+
314
+ def build(self, input_shape=None):
315
+ if self.built:
316
+ return
317
+ self.built = True
318
+ if getattr(self, "attention", None) is not None:
319
+ with tf.name_scope(self.attention.name):
320
+ self.attention.build(None)
321
+ if getattr(self, "sa_layer_norm", None) is not None:
322
+ with tf.name_scope(self.sa_layer_norm.name):
323
+ self.sa_layer_norm.build([None, None, self.config.dim])
324
+ if getattr(self, "ffn", None) is not None:
325
+ with tf.name_scope(self.ffn.name):
326
+ self.ffn.build(None)
327
+ if getattr(self, "output_layer_norm", None) is not None:
328
+ with tf.name_scope(self.output_layer_norm.name):
329
+ self.output_layer_norm.build([None, None, self.config.dim])
330
+
331
+
332
+ class TFTransformer(keras.layers.Layer):
333
+ def __init__(self, config, **kwargs):
334
+ super().__init__(**kwargs)
335
+ self.n_layers = config.n_layers
336
+ self.output_hidden_states = config.output_hidden_states
337
+ self.output_attentions = config.output_attentions
338
+
339
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
340
+
341
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
342
+ # docstyle-ignore
343
+ """
344
+ Parameters:
345
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
346
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
347
+
348
+ Returns:
349
+ hidden_state: tf.Tensor(bs, seq_length, dim)
350
+ Sequence of hidden states in the last (top) layer
351
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
352
+ Tuple of length n_layers with the hidden states from each layer.
353
+ Optional: only if output_hidden_states=True
354
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
355
+ Tuple of length n_layers with the attention weights from each layer
356
+ Optional: only if output_attentions=True
357
+ """
358
+ all_hidden_states = () if output_hidden_states else None
359
+ all_attentions = () if output_attentions else None
360
+
361
+ hidden_state = x
362
+ for i, layer_module in enumerate(self.layer):
363
+ if output_hidden_states:
364
+ all_hidden_states = all_hidden_states + (hidden_state,)
365
+
366
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
367
+ hidden_state = layer_outputs[-1]
368
+
369
+ if output_attentions:
370
+ assert len(layer_outputs) == 2
371
+ attentions = layer_outputs[0]
372
+ all_attentions = all_attentions + (attentions,)
373
+ else:
374
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
375
+
376
+ # Add last layer
377
+ if output_hidden_states:
378
+ all_hidden_states = all_hidden_states + (hidden_state,)
379
+
380
+ if not return_dict:
381
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
382
+ return TFBaseModelOutput(
383
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
384
+ )
385
+
386
+ def build(self, input_shape=None):
387
+ if self.built:
388
+ return
389
+ self.built = True
390
+ if getattr(self, "layer", None) is not None:
391
+ for layer in self.layer:
392
+ with tf.name_scope(layer.name):
393
+ layer.build(None)
394
+
395
+
396
+ @keras_serializable
397
+ class TFDistilBertMainLayer(keras.layers.Layer):
398
+ config_class = DistilBertConfig
399
+
400
+ def __init__(self, config, **kwargs):
401
+ super().__init__(**kwargs)
402
+
403
+ self.config = config
404
+ self.num_hidden_layers = config.num_hidden_layers
405
+ self.output_attentions = config.output_attentions
406
+ self.output_hidden_states = config.output_hidden_states
407
+ self.return_dict = config.use_return_dict
408
+
409
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
410
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
411
+
412
+ def get_input_embeddings(self):
413
+ return self.embeddings
414
+
415
+ def set_input_embeddings(self, value):
416
+ self.embeddings.weight = value
417
+ self.embeddings.vocab_size = value.shape[0]
418
+
419
+ def _prune_heads(self, heads_to_prune):
420
+ raise NotImplementedError
421
+
422
+ @unpack_inputs
423
+ def call(
424
+ self,
425
+ input_ids=None,
426
+ attention_mask=None,
427
+ head_mask=None,
428
+ inputs_embeds=None,
429
+ output_attentions=None,
430
+ output_hidden_states=None,
431
+ return_dict=None,
432
+ training=False,
433
+ ):
434
+ if input_ids is not None and inputs_embeds is not None:
435
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
436
+ elif input_ids is not None:
437
+ input_shape = shape_list(input_ids)
438
+ elif inputs_embeds is not None:
439
+ input_shape = shape_list(inputs_embeds)[:-1]
440
+ else:
441
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
442
+
443
+ if attention_mask is None:
444
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
445
+
446
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
447
+
448
+ # Prepare head mask if needed
449
+ # 1.0 in head_mask indicate we keep the head
450
+ # attention_probs has shape bsz x n_heads x N x N
451
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
452
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
453
+ if head_mask is not None:
454
+ raise NotImplementedError
455
+ else:
456
+ head_mask = [None] * self.num_hidden_layers
457
+
458
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
459
+ tfmr_output = self.transformer(
460
+ embedding_output,
461
+ attention_mask,
462
+ head_mask,
463
+ output_attentions,
464
+ output_hidden_states,
465
+ return_dict,
466
+ training=training,
467
+ )
468
+
469
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
470
+
471
+ def build(self, input_shape=None):
472
+ if self.built:
473
+ return
474
+ self.built = True
475
+ if getattr(self, "embeddings", None) is not None:
476
+ with tf.name_scope(self.embeddings.name):
477
+ self.embeddings.build(None)
478
+ if getattr(self, "transformer", None) is not None:
479
+ with tf.name_scope(self.transformer.name):
480
+ self.transformer.build(None)
481
+
482
+
483
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
484
+ class TFDistilBertPreTrainedModel(TFPreTrainedModel):
485
+ """
486
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
487
+ models.
488
+ """
489
+
490
+ config_class = DistilBertConfig
491
+ base_model_prefix = "distilbert"
492
+
493
+
494
+ DISTILBERT_START_DOCSTRING = r"""
495
+
496
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
497
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
498
+ etc.)
499
+
500
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
501
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
502
+ behavior.
503
+
504
+ <Tip>
505
+
506
+ TensorFlow models and layers in `transformers` accept two formats as input:
507
+
508
+ - having all inputs as keyword arguments (like PyTorch models), or
509
+ - having all inputs as a list, tuple or dict in the first positional argument.
510
+
511
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
512
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
513
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
514
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
515
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
516
+ positional argument:
517
+
518
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
519
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
520
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
521
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
522
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
523
+
524
+ Note that when creating models and layers with
525
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
526
+ about any of this, as you can just pass inputs like you would to any other Python function!
527
+
528
+ </Tip>
529
+
530
+ Parameters:
531
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
532
+ Initializing with a config file does not load the weights associated with the model, only the
533
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
534
+ """
535
+
536
+ DISTILBERT_INPUTS_DOCSTRING = r"""
537
+ Args:
538
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
539
+ Indices of input sequence tokens in the vocabulary.
540
+
541
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
542
+ [`PreTrainedTokenizer.encode`] for details.
543
+
544
+ [What are input IDs?](../glossary#input-ids)
545
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
546
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
547
+
548
+ - 1 for tokens that are **not masked**,
549
+ - 0 for tokens that are **masked**.
550
+
551
+ [What are attention masks?](../glossary#attention-mask)
552
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
553
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
554
+
555
+ - 1 indicates the head is **not masked**,
556
+ - 0 indicates the head is **masked**.
557
+
558
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
559
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
560
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
561
+ model's internal embedding lookup matrix.
562
+ output_attentions (`bool`, *optional*):
563
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
564
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
565
+ config will be used instead.
566
+ output_hidden_states (`bool`, *optional*):
567
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
568
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
569
+ used instead.
570
+ return_dict (`bool`, *optional*):
571
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
572
+ eager mode, in graph mode the value will always be set to True.
573
+ training (`bool`, *optional*, defaults to `False`):
574
+ Whether or not to use the model in training mode (some modules like dropout modules have different
575
+ behaviors between training and evaluation).
576
+ """
577
+
578
+
579
+ @add_start_docstrings(
580
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
581
+ DISTILBERT_START_DOCSTRING,
582
+ )
583
+ class TFDistilBertModel(TFDistilBertPreTrainedModel):
584
+ def __init__(self, config, *inputs, **kwargs):
585
+ super().__init__(config, *inputs, **kwargs)
586
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
587
+
588
+ @unpack_inputs
589
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
590
+ @add_code_sample_docstrings(
591
+ checkpoint=_CHECKPOINT_FOR_DOC,
592
+ output_type=TFBaseModelOutput,
593
+ config_class=_CONFIG_FOR_DOC,
594
+ )
595
+ def call(
596
+ self,
597
+ input_ids: TFModelInputType | None = None,
598
+ attention_mask: np.ndarray | tf.Tensor | None = None,
599
+ head_mask: np.ndarray | tf.Tensor | None = None,
600
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
601
+ output_attentions: Optional[bool] = None,
602
+ output_hidden_states: Optional[bool] = None,
603
+ return_dict: Optional[bool] = None,
604
+ training: Optional[bool] = False,
605
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
606
+ outputs = self.distilbert(
607
+ input_ids=input_ids,
608
+ attention_mask=attention_mask,
609
+ head_mask=head_mask,
610
+ inputs_embeds=inputs_embeds,
611
+ output_attentions=output_attentions,
612
+ output_hidden_states=output_hidden_states,
613
+ return_dict=return_dict,
614
+ training=training,
615
+ )
616
+ return outputs
617
+
618
+ def build(self, input_shape=None):
619
+ if self.built:
620
+ return
621
+ self.built = True
622
+ if getattr(self, "distilbert", None) is not None:
623
+ with tf.name_scope(self.distilbert.name):
624
+ self.distilbert.build(None)
625
+
626
+
627
+ class TFDistilBertLMHead(keras.layers.Layer):
628
+ def __init__(self, config, input_embeddings, **kwargs):
629
+ super().__init__(**kwargs)
630
+
631
+ self.config = config
632
+ self.dim = config.dim
633
+
634
+ # The output weights are the same as the input embeddings, but there is
635
+ # an output-only bias for each token.
636
+ self.input_embeddings = input_embeddings
637
+
638
+ def build(self, input_shape):
639
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
640
+
641
+ super().build(input_shape)
642
+
643
+ def get_output_embeddings(self):
644
+ return self.input_embeddings
645
+
646
+ def set_output_embeddings(self, value):
647
+ self.input_embeddings.weight = value
648
+ self.input_embeddings.vocab_size = shape_list(value)[0]
649
+
650
+ def get_bias(self):
651
+ return {"bias": self.bias}
652
+
653
+ def set_bias(self, value):
654
+ self.bias = value["bias"]
655
+ self.config.vocab_size = shape_list(value["bias"])[0]
656
+
657
+ def call(self, hidden_states):
658
+ seq_length = shape_list(tensor=hidden_states)[1]
659
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
660
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
661
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
662
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
663
+
664
+ return hidden_states
665
+
666
+
667
+ @add_start_docstrings(
668
+ """DistilBert Model with a `masked language modeling` head on top.""",
669
+ DISTILBERT_START_DOCSTRING,
670
+ )
671
+ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
672
+ def __init__(self, config, *inputs, **kwargs):
673
+ super().__init__(config, *inputs, **kwargs)
674
+ self.config = config
675
+
676
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
677
+ self.vocab_transform = keras.layers.Dense(
678
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
679
+ )
680
+ self.act = get_tf_activation(config.activation)
681
+ self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
682
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
683
+
684
+ def get_lm_head(self):
685
+ return self.vocab_projector
686
+
687
+ def get_prefix_bias_name(self):
688
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
689
+ return self.name + "/" + self.vocab_projector.name
690
+
691
+ @unpack_inputs
692
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
693
+ @add_code_sample_docstrings(
694
+ checkpoint=_CHECKPOINT_FOR_DOC,
695
+ output_type=TFMaskedLMOutput,
696
+ config_class=_CONFIG_FOR_DOC,
697
+ )
698
+ def call(
699
+ self,
700
+ input_ids: TFModelInputType | None = None,
701
+ attention_mask: np.ndarray | tf.Tensor | None = None,
702
+ head_mask: np.ndarray | tf.Tensor | None = None,
703
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
704
+ output_attentions: Optional[bool] = None,
705
+ output_hidden_states: Optional[bool] = None,
706
+ return_dict: Optional[bool] = None,
707
+ labels: np.ndarray | tf.Tensor | None = None,
708
+ training: Optional[bool] = False,
709
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
710
+ r"""
711
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
712
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
713
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
714
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
715
+ """
716
+ distilbert_output = self.distilbert(
717
+ input_ids=input_ids,
718
+ attention_mask=attention_mask,
719
+ head_mask=head_mask,
720
+ inputs_embeds=inputs_embeds,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ training=training,
725
+ )
726
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
727
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
728
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
729
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
730
+ prediction_logits = self.vocab_projector(prediction_logits)
731
+
732
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
733
+
734
+ if not return_dict:
735
+ output = (prediction_logits,) + distilbert_output[1:]
736
+ return ((loss,) + output) if loss is not None else output
737
+
738
+ return TFMaskedLMOutput(
739
+ loss=loss,
740
+ logits=prediction_logits,
741
+ hidden_states=distilbert_output.hidden_states,
742
+ attentions=distilbert_output.attentions,
743
+ )
744
+
745
+ def build(self, input_shape=None):
746
+ if self.built:
747
+ return
748
+ self.built = True
749
+ if getattr(self, "distilbert", None) is not None:
750
+ with tf.name_scope(self.distilbert.name):
751
+ self.distilbert.build(None)
752
+ if getattr(self, "vocab_transform", None) is not None:
753
+ with tf.name_scope(self.vocab_transform.name):
754
+ self.vocab_transform.build([None, None, self.config.dim])
755
+ if getattr(self, "vocab_layer_norm", None) is not None:
756
+ with tf.name_scope(self.vocab_layer_norm.name):
757
+ self.vocab_layer_norm.build([None, None, self.config.dim])
758
+ if getattr(self, "vocab_projector", None) is not None:
759
+ with tf.name_scope(self.vocab_projector.name):
760
+ self.vocab_projector.build(None)
761
+
762
+
763
+ @add_start_docstrings(
764
+ """
765
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
766
+ pooled output) e.g. for GLUE tasks.
767
+ """,
768
+ DISTILBERT_START_DOCSTRING,
769
+ )
770
+ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
771
+ def __init__(self, config, *inputs, **kwargs):
772
+ super().__init__(config, *inputs, **kwargs)
773
+ self.num_labels = config.num_labels
774
+
775
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
776
+ self.pre_classifier = keras.layers.Dense(
777
+ config.dim,
778
+ kernel_initializer=get_initializer(config.initializer_range),
779
+ activation="relu",
780
+ name="pre_classifier",
781
+ )
782
+ self.classifier = keras.layers.Dense(
783
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
784
+ )
785
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
786
+ self.config = config
787
+
788
+ @unpack_inputs
789
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
790
+ @add_code_sample_docstrings(
791
+ checkpoint=_CHECKPOINT_FOR_DOC,
792
+ output_type=TFSequenceClassifierOutput,
793
+ config_class=_CONFIG_FOR_DOC,
794
+ )
795
+ def call(
796
+ self,
797
+ input_ids: TFModelInputType | None = None,
798
+ attention_mask: np.ndarray | tf.Tensor | None = None,
799
+ head_mask: np.ndarray | tf.Tensor | None = None,
800
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
801
+ output_attentions: Optional[bool] = None,
802
+ output_hidden_states: Optional[bool] = None,
803
+ return_dict: Optional[bool] = None,
804
+ labels: np.ndarray | tf.Tensor | None = None,
805
+ training: Optional[bool] = False,
806
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
807
+ r"""
808
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
809
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
810
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
811
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
812
+ """
813
+ distilbert_output = self.distilbert(
814
+ input_ids=input_ids,
815
+ attention_mask=attention_mask,
816
+ head_mask=head_mask,
817
+ inputs_embeds=inputs_embeds,
818
+ output_attentions=output_attentions,
819
+ output_hidden_states=output_hidden_states,
820
+ return_dict=return_dict,
821
+ training=training,
822
+ )
823
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
824
+ pooled_output = hidden_state[:, 0] # (bs, dim)
825
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
826
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
827
+ logits = self.classifier(pooled_output) # (bs, dim)
828
+
829
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
830
+
831
+ if not return_dict:
832
+ output = (logits,) + distilbert_output[1:]
833
+ return ((loss,) + output) if loss is not None else output
834
+
835
+ return TFSequenceClassifierOutput(
836
+ loss=loss,
837
+ logits=logits,
838
+ hidden_states=distilbert_output.hidden_states,
839
+ attentions=distilbert_output.attentions,
840
+ )
841
+
842
+ def build(self, input_shape=None):
843
+ if self.built:
844
+ return
845
+ self.built = True
846
+ if getattr(self, "distilbert", None) is not None:
847
+ with tf.name_scope(self.distilbert.name):
848
+ self.distilbert.build(None)
849
+ if getattr(self, "pre_classifier", None) is not None:
850
+ with tf.name_scope(self.pre_classifier.name):
851
+ self.pre_classifier.build([None, None, self.config.dim])
852
+ if getattr(self, "classifier", None) is not None:
853
+ with tf.name_scope(self.classifier.name):
854
+ self.classifier.build([None, None, self.config.dim])
855
+
856
+
857
+ @add_start_docstrings(
858
+ """
859
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
860
+ for Named-Entity-Recognition (NER) tasks.
861
+ """,
862
+ DISTILBERT_START_DOCSTRING,
863
+ )
864
+ class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
865
+ def __init__(self, config, *inputs, **kwargs):
866
+ super().__init__(config, *inputs, **kwargs)
867
+ self.num_labels = config.num_labels
868
+
869
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
870
+ self.dropout = keras.layers.Dropout(config.dropout)
871
+ self.classifier = keras.layers.Dense(
872
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
873
+ )
874
+ self.config = config
875
+
876
+ @unpack_inputs
877
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
878
+ @add_code_sample_docstrings(
879
+ checkpoint=_CHECKPOINT_FOR_DOC,
880
+ output_type=TFTokenClassifierOutput,
881
+ config_class=_CONFIG_FOR_DOC,
882
+ )
883
+ def call(
884
+ self,
885
+ input_ids: TFModelInputType | None = None,
886
+ attention_mask: np.ndarray | tf.Tensor | None = None,
887
+ head_mask: np.ndarray | tf.Tensor | None = None,
888
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
889
+ output_attentions: Optional[bool] = None,
890
+ output_hidden_states: Optional[bool] = None,
891
+ return_dict: Optional[bool] = None,
892
+ labels: np.ndarray | tf.Tensor | None = None,
893
+ training: Optional[bool] = False,
894
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
895
+ r"""
896
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
897
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
898
+ """
899
+ outputs = self.distilbert(
900
+ input_ids=input_ids,
901
+ attention_mask=attention_mask,
902
+ head_mask=head_mask,
903
+ inputs_embeds=inputs_embeds,
904
+ output_attentions=output_attentions,
905
+ output_hidden_states=output_hidden_states,
906
+ return_dict=return_dict,
907
+ training=training,
908
+ )
909
+ sequence_output = outputs[0]
910
+ sequence_output = self.dropout(sequence_output, training=training)
911
+ logits = self.classifier(sequence_output)
912
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
913
+
914
+ if not return_dict:
915
+ output = (logits,) + outputs[1:]
916
+ return ((loss,) + output) if loss is not None else output
917
+
918
+ return TFTokenClassifierOutput(
919
+ loss=loss,
920
+ logits=logits,
921
+ hidden_states=outputs.hidden_states,
922
+ attentions=outputs.attentions,
923
+ )
924
+
925
+ def build(self, input_shape=None):
926
+ if self.built:
927
+ return
928
+ self.built = True
929
+ if getattr(self, "distilbert", None) is not None:
930
+ with tf.name_scope(self.distilbert.name):
931
+ self.distilbert.build(None)
932
+ if getattr(self, "classifier", None) is not None:
933
+ with tf.name_scope(self.classifier.name):
934
+ self.classifier.build([None, None, self.config.hidden_size])
935
+
936
+
937
+ @add_start_docstrings(
938
+ """
939
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
940
+ a softmax) e.g. for RocStories/SWAG tasks.
941
+ """,
942
+ DISTILBERT_START_DOCSTRING,
943
+ )
944
+ class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
945
+ def __init__(self, config, *inputs, **kwargs):
946
+ super().__init__(config, *inputs, **kwargs)
947
+
948
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
949
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
950
+ self.pre_classifier = keras.layers.Dense(
951
+ config.dim,
952
+ kernel_initializer=get_initializer(config.initializer_range),
953
+ activation="relu",
954
+ name="pre_classifier",
955
+ )
956
+ self.classifier = keras.layers.Dense(
957
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
958
+ )
959
+ self.config = config
960
+
961
+ @unpack_inputs
962
+ @add_start_docstrings_to_model_forward(
963
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
964
+ )
965
+ @add_code_sample_docstrings(
966
+ checkpoint=_CHECKPOINT_FOR_DOC,
967
+ output_type=TFMultipleChoiceModelOutput,
968
+ config_class=_CONFIG_FOR_DOC,
969
+ )
970
+ def call(
971
+ self,
972
+ input_ids: TFModelInputType | None = None,
973
+ attention_mask: np.ndarray | tf.Tensor | None = None,
974
+ head_mask: np.ndarray | tf.Tensor | None = None,
975
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
976
+ output_attentions: Optional[bool] = None,
977
+ output_hidden_states: Optional[bool] = None,
978
+ return_dict: Optional[bool] = None,
979
+ labels: np.ndarray | tf.Tensor | None = None,
980
+ training: Optional[bool] = False,
981
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
982
+ r"""
983
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
984
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
985
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
986
+ """
987
+ if input_ids is not None:
988
+ num_choices = shape_list(input_ids)[1]
989
+ seq_length = shape_list(input_ids)[2]
990
+ else:
991
+ num_choices = shape_list(inputs_embeds)[1]
992
+ seq_length = shape_list(inputs_embeds)[2]
993
+
994
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
995
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
996
+ flat_inputs_embeds = (
997
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
998
+ if inputs_embeds is not None
999
+ else None
1000
+ )
1001
+ distilbert_output = self.distilbert(
1002
+ flat_input_ids,
1003
+ flat_attention_mask,
1004
+ head_mask,
1005
+ flat_inputs_embeds,
1006
+ output_attentions,
1007
+ output_hidden_states,
1008
+ return_dict=return_dict,
1009
+ training=training,
1010
+ )
1011
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
1012
+ pooled_output = hidden_state[:, 0] # (bs, dim)
1013
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
1014
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
1015
+ logits = self.classifier(pooled_output)
1016
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1017
+
1018
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1019
+
1020
+ if not return_dict:
1021
+ output = (reshaped_logits,) + distilbert_output[1:]
1022
+ return ((loss,) + output) if loss is not None else output
1023
+
1024
+ return TFMultipleChoiceModelOutput(
1025
+ loss=loss,
1026
+ logits=reshaped_logits,
1027
+ hidden_states=distilbert_output.hidden_states,
1028
+ attentions=distilbert_output.attentions,
1029
+ )
1030
+
1031
+ def build(self, input_shape=None):
1032
+ if self.built:
1033
+ return
1034
+ self.built = True
1035
+ if getattr(self, "distilbert", None) is not None:
1036
+ with tf.name_scope(self.distilbert.name):
1037
+ self.distilbert.build(None)
1038
+ if getattr(self, "pre_classifier", None) is not None:
1039
+ with tf.name_scope(self.pre_classifier.name):
1040
+ self.pre_classifier.build([None, None, self.config.dim])
1041
+ if getattr(self, "classifier", None) is not None:
1042
+ with tf.name_scope(self.classifier.name):
1043
+ self.classifier.build([None, None, self.config.dim])
1044
+
1045
+
1046
+ @add_start_docstrings(
1047
+ """
1048
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1049
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1050
+ """,
1051
+ DISTILBERT_START_DOCSTRING,
1052
+ )
1053
+ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
1054
+ def __init__(self, config, *inputs, **kwargs):
1055
+ super().__init__(config, *inputs, **kwargs)
1056
+
1057
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
1058
+ self.qa_outputs = keras.layers.Dense(
1059
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1060
+ )
1061
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
1062
+ self.dropout = keras.layers.Dropout(config.qa_dropout)
1063
+ self.config = config
1064
+
1065
+ @unpack_inputs
1066
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1067
+ @add_code_sample_docstrings(
1068
+ checkpoint=_CHECKPOINT_FOR_DOC,
1069
+ output_type=TFQuestionAnsweringModelOutput,
1070
+ config_class=_CONFIG_FOR_DOC,
1071
+ )
1072
+ def call(
1073
+ self,
1074
+ input_ids: TFModelInputType | None = None,
1075
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1076
+ head_mask: np.ndarray | tf.Tensor | None = None,
1077
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1078
+ output_attentions: Optional[bool] = None,
1079
+ output_hidden_states: Optional[bool] = None,
1080
+ return_dict: Optional[bool] = None,
1081
+ start_positions: np.ndarray | tf.Tensor | None = None,
1082
+ end_positions: np.ndarray | tf.Tensor | None = None,
1083
+ training: Optional[bool] = False,
1084
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1085
+ r"""
1086
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1087
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1088
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1089
+ are not taken into account for computing the loss.
1090
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1091
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1092
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1093
+ are not taken into account for computing the loss.
1094
+ """
1095
+ distilbert_output = self.distilbert(
1096
+ input_ids=input_ids,
1097
+ attention_mask=attention_mask,
1098
+ head_mask=head_mask,
1099
+ inputs_embeds=inputs_embeds,
1100
+ output_attentions=output_attentions,
1101
+ output_hidden_states=output_hidden_states,
1102
+ return_dict=return_dict,
1103
+ training=training,
1104
+ )
1105
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
1106
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
1107
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
1108
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1109
+ start_logits = tf.squeeze(start_logits, axis=-1)
1110
+ end_logits = tf.squeeze(end_logits, axis=-1)
1111
+
1112
+ loss = None
1113
+ if start_positions is not None and end_positions is not None:
1114
+ labels = {"start_position": start_positions}
1115
+ labels["end_position"] = end_positions
1116
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1117
+
1118
+ if not return_dict:
1119
+ output = (start_logits, end_logits) + distilbert_output[1:]
1120
+ return ((loss,) + output) if loss is not None else output
1121
+
1122
+ return TFQuestionAnsweringModelOutput(
1123
+ loss=loss,
1124
+ start_logits=start_logits,
1125
+ end_logits=end_logits,
1126
+ hidden_states=distilbert_output.hidden_states,
1127
+ attentions=distilbert_output.attentions,
1128
+ )
1129
+
1130
+ def build(self, input_shape=None):
1131
+ if self.built:
1132
+ return
1133
+ self.built = True
1134
+ if getattr(self, "distilbert", None) is not None:
1135
+ with tf.name_scope(self.distilbert.name):
1136
+ self.distilbert.build(None)
1137
+ if getattr(self, "qa_outputs", None) is not None:
1138
+ with tf.name_scope(self.qa_outputs.name):
1139
+ self.qa_outputs.build([None, None, self.config.dim])
venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DistilBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+
31
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
44
+ def whitespace_tokenize(text):
45
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
46
+ text = text.strip()
47
+ if not text:
48
+ return []
49
+ tokens = text.split()
50
+ return tokens
51
+
52
+
53
+ class DistilBertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a DistilBERT tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original BERT).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+ model_input_names = ["input_ids", "attention_mask"]
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_file,
101
+ do_lower_case=True,
102
+ do_basic_tokenize=True,
103
+ never_split=None,
104
+ unk_token="[UNK]",
105
+ sep_token="[SEP]",
106
+ pad_token="[PAD]",
107
+ cls_token="[CLS]",
108
+ mask_token="[MASK]",
109
+ tokenize_chinese_chars=True,
110
+ strip_accents=None,
111
+ **kwargs,
112
+ ):
113
+ if not os.path.isfile(vocab_file):
114
+ raise ValueError(
115
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
116
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
117
+ )
118
+ self.vocab = load_vocab(vocab_file)
119
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
120
+ self.do_basic_tokenize = do_basic_tokenize
121
+ if do_basic_tokenize:
122
+ self.basic_tokenizer = BasicTokenizer(
123
+ do_lower_case=do_lower_case,
124
+ never_split=never_split,
125
+ tokenize_chinese_chars=tokenize_chinese_chars,
126
+ strip_accents=strip_accents,
127
+ )
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
146
+ def do_lower_case(self):
147
+ return self.basic_tokenizer.do_lower_case
148
+
149
+ @property
150
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
151
+ def vocab_size(self):
152
+ return len(self.vocab)
153
+
154
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
155
+ def get_vocab(self):
156
+ return dict(self.vocab, **self.added_tokens_encoder)
157
+
158
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
159
+ def _tokenize(self, text, split_special_tokens=False):
160
+ split_tokens = []
161
+ if self.do_basic_tokenize:
162
+ for token in self.basic_tokenizer.tokenize(
163
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
164
+ ):
165
+ # If the token is part of the never_split set
166
+ if token in self.basic_tokenizer.never_split:
167
+ split_tokens.append(token)
168
+ else:
169
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
170
+ else:
171
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
172
+ return split_tokens
173
+
174
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
175
+ def _convert_token_to_id(self, token):
176
+ """Converts a token (str) in an id using the vocab."""
177
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
178
+
179
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
180
+ def _convert_id_to_token(self, index):
181
+ """Converts an index (integer) in a token (str) using the vocab."""
182
+ return self.ids_to_tokens.get(index, self.unk_token)
183
+
184
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
185
+ def convert_tokens_to_string(self, tokens):
186
+ """Converts a sequence of tokens (string) in a single string."""
187
+ out_string = " ".join(tokens).replace(" ##", "").strip()
188
+ return out_string
189
+
190
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
191
+ def build_inputs_with_special_tokens(
192
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
193
+ ) -> List[int]:
194
+ """
195
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
196
+ adding special tokens. A BERT sequence has the following format:
197
+
198
+ - single sequence: `[CLS] X [SEP]`
199
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs to which the special tokens will be added.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+
207
+ Returns:
208
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
209
+ """
210
+ if token_ids_1 is None:
211
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
212
+ cls = [self.cls_token_id]
213
+ sep = [self.sep_token_id]
214
+ return cls + token_ids_0 + sep + token_ids_1 + sep
215
+
216
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
217
+ def get_special_tokens_mask(
218
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
219
+ ) -> List[int]:
220
+ """
221
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
222
+ special tokens using the tokenizer `prepare_for_model` method.
223
+
224
+ Args:
225
+ token_ids_0 (`List[int]`):
226
+ List of IDs.
227
+ token_ids_1 (`List[int]`, *optional*):
228
+ Optional second list of IDs for sequence pairs.
229
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
230
+ Whether or not the token list is already formatted with special tokens for the model.
231
+
232
+ Returns:
233
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
234
+ """
235
+
236
+ if already_has_special_tokens:
237
+ return super().get_special_tokens_mask(
238
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
239
+ )
240
+
241
+ if token_ids_1 is not None:
242
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
243
+ return [1] + ([0] * len(token_ids_0)) + [1]
244
+
245
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
246
+ def create_token_type_ids_from_sequences(
247
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
248
+ ) -> List[int]:
249
+ """
250
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
251
+ pair mask has the following format:
252
+
253
+ ```
254
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
255
+ | first sequence | second sequence |
256
+ ```
257
+
258
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
259
+
260
+ Args:
261
+ token_ids_0 (`List[int]`):
262
+ List of IDs.
263
+ token_ids_1 (`List[int]`, *optional*):
264
+ Optional second list of IDs for sequence pairs.
265
+
266
+ Returns:
267
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
268
+ """
269
+ sep = [self.sep_token_id]
270
+ cls = [self.cls_token_id]
271
+ if token_ids_1 is None:
272
+ return len(cls + token_ids_0 + sep) * [0]
273
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
274
+
275
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
276
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
277
+ index = 0
278
+ if os.path.isdir(save_directory):
279
+ vocab_file = os.path.join(
280
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
281
+ )
282
+ else:
283
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
284
+ with open(vocab_file, "w", encoding="utf-8") as writer:
285
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
286
+ if index != token_index:
287
+ logger.warning(
288
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
289
+ " Please check that the vocabulary is not corrupted!"
290
+ )
291
+ index = token_index
292
+ writer.write(token + "\n")
293
+ index += 1
294
+ return (vocab_file,)
295
+
296
+
297
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
298
+ class BasicTokenizer(object):
299
+ """
300
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
301
+
302
+ Args:
303
+ do_lower_case (`bool`, *optional*, defaults to `True`):
304
+ Whether or not to lowercase the input when tokenizing.
305
+ never_split (`Iterable`, *optional*):
306
+ Collection of tokens which will never be split during tokenization. Only has an effect when
307
+ `do_basic_tokenize=True`
308
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
309
+ Whether or not to tokenize Chinese characters.
310
+
311
+ This should likely be deactivated for Japanese (see this
312
+ [issue](https://github.com/huggingface/transformers/issues/328)).
313
+ strip_accents (`bool`, *optional*):
314
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
315
+ value for `lowercase` (as in the original BERT).
316
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
317
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
318
+ the full context of the words, such as contractions.
319
+ """
320
+
321
+ def __init__(
322
+ self,
323
+ do_lower_case=True,
324
+ never_split=None,
325
+ tokenize_chinese_chars=True,
326
+ strip_accents=None,
327
+ do_split_on_punc=True,
328
+ ):
329
+ if never_split is None:
330
+ never_split = []
331
+ self.do_lower_case = do_lower_case
332
+ self.never_split = set(never_split)
333
+ self.tokenize_chinese_chars = tokenize_chinese_chars
334
+ self.strip_accents = strip_accents
335
+ self.do_split_on_punc = do_split_on_punc
336
+
337
+ def tokenize(self, text, never_split=None):
338
+ """
339
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
340
+
341
+ Args:
342
+ never_split (`List[str]`, *optional*)
343
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
344
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
345
+ """
346
+ # union() returns a new set by concatenating the two sets.
347
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
348
+ text = self._clean_text(text)
349
+
350
+ # This was added on November 1st, 2018 for the multilingual and Chinese
351
+ # models. This is also applied to the English models now, but it doesn't
352
+ # matter since the English models were not trained on any Chinese data
353
+ # and generally don't have any Chinese data in them (there are Chinese
354
+ # characters in the vocabulary because Wikipedia does have some Chinese
355
+ # words in the English Wikipedia.).
356
+ if self.tokenize_chinese_chars:
357
+ text = self._tokenize_chinese_chars(text)
358
+ # prevents treating the same character with different unicode codepoints as different characters
359
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
360
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
361
+ split_tokens = []
362
+ for token in orig_tokens:
363
+ if token not in never_split:
364
+ if self.do_lower_case:
365
+ token = token.lower()
366
+ if self.strip_accents is not False:
367
+ token = self._run_strip_accents(token)
368
+ elif self.strip_accents:
369
+ token = self._run_strip_accents(token)
370
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
371
+
372
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
373
+ return output_tokens
374
+
375
+ def _run_strip_accents(self, text):
376
+ """Strips accents from a piece of text."""
377
+ text = unicodedata.normalize("NFD", text)
378
+ output = []
379
+ for char in text:
380
+ cat = unicodedata.category(char)
381
+ if cat == "Mn":
382
+ continue
383
+ output.append(char)
384
+ return "".join(output)
385
+
386
+ def _run_split_on_punc(self, text, never_split=None):
387
+ """Splits punctuation on a piece of text."""
388
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
389
+ return [text]
390
+ chars = list(text)
391
+ i = 0
392
+ start_new_word = True
393
+ output = []
394
+ while i < len(chars):
395
+ char = chars[i]
396
+ if _is_punctuation(char):
397
+ output.append([char])
398
+ start_new_word = True
399
+ else:
400
+ if start_new_word:
401
+ output.append([])
402
+ start_new_word = False
403
+ output[-1].append(char)
404
+ i += 1
405
+
406
+ return ["".join(x) for x in output]
407
+
408
+ def _tokenize_chinese_chars(self, text):
409
+ """Adds whitespace around any CJK character."""
410
+ output = []
411
+ for char in text:
412
+ cp = ord(char)
413
+ if self._is_chinese_char(cp):
414
+ output.append(" ")
415
+ output.append(char)
416
+ output.append(" ")
417
+ else:
418
+ output.append(char)
419
+ return "".join(output)
420
+
421
+ def _is_chinese_char(self, cp):
422
+ """Checks whether CP is the codepoint of a CJK character."""
423
+ # This defines a "chinese character" as anything in the CJK Unicode block:
424
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
425
+ #
426
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
427
+ # despite its name. The modern Korean Hangul alphabet is a different block,
428
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
429
+ # space-separated words, so they are not treated specially and handled
430
+ # like the all of the other languages.
431
+ if (
432
+ (cp >= 0x4E00 and cp <= 0x9FFF)
433
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
434
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
435
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
436
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
437
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
438
+ or (cp >= 0xF900 and cp <= 0xFAFF)
439
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
440
+ ): #
441
+ return True
442
+
443
+ return False
444
+
445
+ def _clean_text(self, text):
446
+ """Performs invalid character removal and whitespace cleanup on text."""
447
+ output = []
448
+ for char in text:
449
+ cp = ord(char)
450
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
451
+ continue
452
+ if _is_whitespace(char):
453
+ output.append(" ")
454
+ else:
455
+ output.append(char)
456
+ return "".join(output)
457
+
458
+
459
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
460
+ class WordpieceTokenizer(object):
461
+ """Runs WordPiece tokenization."""
462
+
463
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
464
+ self.vocab = vocab
465
+ self.unk_token = unk_token
466
+ self.max_input_chars_per_word = max_input_chars_per_word
467
+
468
+ def tokenize(self, text):
469
+ """
470
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
471
+ tokenization using the given vocabulary.
472
+
473
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
474
+
475
+ Args:
476
+ text: A single token or whitespace separated tokens. This should have
477
+ already been passed through *BasicTokenizer*.
478
+
479
+ Returns:
480
+ A list of wordpiece tokens.
481
+ """
482
+
483
+ output_tokens = []
484
+ for token in whitespace_tokenize(text):
485
+ chars = list(token)
486
+ if len(chars) > self.max_input_chars_per_word:
487
+ output_tokens.append(self.unk_token)
488
+ continue
489
+
490
+ is_bad = False
491
+ start = 0
492
+ sub_tokens = []
493
+ while start < len(chars):
494
+ end = len(chars)
495
+ cur_substr = None
496
+ while start < end:
497
+ substr = "".join(chars[start:end])
498
+ if start > 0:
499
+ substr = "##" + substr
500
+ if substr in self.vocab:
501
+ cur_substr = substr
502
+ break
503
+ end -= 1
504
+ if cur_substr is None:
505
+ is_bad = True
506
+ break
507
+ sub_tokens.append(cur_substr)
508
+ start = end
509
+
510
+ if is_bad:
511
+ output_tokens.append(self.unk_token)
512
+ else:
513
+ output_tokens.extend(sub_tokens)
514
+ return output_tokens
venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DistilBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_distilbert import DistilBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+
32
+ class DistilBertTokenizerFast(PreTrainedTokenizerFast):
33
+ r"""
34
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
35
+
36
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
37
+ refer to this superclass for more information regarding those methods.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ File containing the vocabulary.
42
+ do_lower_case (`bool`, *optional*, defaults to `True`):
43
+ Whether or not to lowercase the input when tokenizing.
44
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
48
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
49
+ sequence classification or for a text and a question for question answering. It is also used as the last
50
+ token of a sequence built with special tokens.
51
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
54
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
55
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
56
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
57
+ The token used for masking values. This is the token used when training this model with masked language
58
+ modeling. This is the token which the model will try to predict.
59
+ clean_text (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
61
+ whitespaces by the classic one.
62
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
64
+ issue](https://github.com/huggingface/transformers/issues/328)).
65
+ strip_accents (`bool`, *optional*):
66
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
67
+ value for `lowercase` (as in the original BERT).
68
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
69
+ The prefix for subwords.
70
+ """
71
+
72
+ vocab_files_names = VOCAB_FILES_NAMES
73
+ model_input_names = ["input_ids", "attention_mask"]
74
+ slow_tokenizer_class = DistilBertTokenizer
75
+
76
+ def __init__(
77
+ self,
78
+ vocab_file=None,
79
+ tokenizer_file=None,
80
+ do_lower_case=True,
81
+ unk_token="[UNK]",
82
+ sep_token="[SEP]",
83
+ pad_token="[PAD]",
84
+ cls_token="[CLS]",
85
+ mask_token="[MASK]",
86
+ tokenize_chinese_chars=True,
87
+ strip_accents=None,
88
+ **kwargs,
89
+ ):
90
+ super().__init__(
91
+ vocab_file,
92
+ tokenizer_file=tokenizer_file,
93
+ do_lower_case=do_lower_case,
94
+ unk_token=unk_token,
95
+ sep_token=sep_token,
96
+ pad_token=pad_token,
97
+ cls_token=cls_token,
98
+ mask_token=mask_token,
99
+ tokenize_chinese_chars=tokenize_chinese_chars,
100
+ strip_accents=strip_accents,
101
+ **kwargs,
102
+ )
103
+
104
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
105
+ if (
106
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
107
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
108
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
109
+ ):
110
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
111
+ normalizer_state["lowercase"] = do_lower_case
112
+ normalizer_state["strip_accents"] = strip_accents
113
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
114
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
115
+
116
+ self.do_lower_case = do_lower_case
117
+
118
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
119
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
120
+ """
121
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
122
+ adding special tokens. A BERT sequence has the following format:
123
+
124
+ - single sequence: `[CLS] X [SEP]`
125
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
126
+
127
+ Args:
128
+ token_ids_0 (`List[int]`):
129
+ List of IDs to which the special tokens will be added.
130
+ token_ids_1 (`List[int]`, *optional*):
131
+ Optional second list of IDs for sequence pairs.
132
+
133
+ Returns:
134
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
135
+ """
136
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
137
+
138
+ if token_ids_1 is not None:
139
+ output += token_ids_1 + [self.sep_token_id]
140
+
141
+ return output
142
+
143
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
144
+ def create_token_type_ids_from_sequences(
145
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
146
+ ) -> List[int]:
147
+ """
148
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
149
+ pair mask has the following format:
150
+
151
+ ```
152
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
153
+ | first sequence | second sequence |
154
+ ```
155
+
156
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
157
+
158
+ Args:
159
+ token_ids_0 (`List[int]`):
160
+ List of IDs.
161
+ token_ids_1 (`List[int]`, *optional*):
162
+ Optional second list of IDs for sequence pairs.
163
+
164
+ Returns:
165
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
166
+ """
167
+ sep = [self.sep_token_id]
168
+ cls = [self.cls_token_id]
169
+ if token_ids_1 is None:
170
+ return len(cls + token_ids_0 + sep) * [0]
171
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
172
+
173
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
174
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
175
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
176
+ return tuple(files)
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_encoder_decoder"] = ["EncoderDecoderModel"]
35
+
36
+ try:
37
+ if not is_tf_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_tf_encoder_decoder"] = ["TFEncoderDecoderModel"]
43
+
44
+ try:
45
+ if not is_flax_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_flax_encoder_decoder"] = ["FlaxEncoderDecoderModel"]
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_encoder_decoder import EncoderDecoderConfig
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_encoder_decoder import EncoderDecoderModel
62
+
63
+ try:
64
+ if not is_tf_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
70
+
71
+ try:
72
+ if not is_flax_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc ADDED
Binary file (31.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class EncoderDecoderConfig(PretrainedConfig):
26
+ r"""
27
+ [`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
28
+ used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
29
+ configs.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ kwargs (*optional*):
36
+ Dictionary of keyword arguments. Notably:
37
+
38
+ - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
39
+ the encoder config.
40
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
41
+ the decoder config.
42
+
43
+ Examples:
44
+
45
+ ```python
46
+ >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
47
+
48
+ >>> # Initializing a BERT google-bert/bert-base-uncased style configuration
49
+ >>> config_encoder = BertConfig()
50
+ >>> config_decoder = BertConfig()
51
+
52
+ >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
53
+
54
+ >>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
55
+ >>> model = EncoderDecoderModel(config=config)
56
+
57
+ >>> # Accessing the model configuration
58
+ >>> config_encoder = model.config.encoder
59
+ >>> config_decoder = model.config.decoder
60
+ >>> # set decoder config to causal lm
61
+ >>> config_decoder.is_decoder = True
62
+ >>> config_decoder.add_cross_attention = True
63
+
64
+ >>> # Saving the model, including its configuration
65
+ >>> model.save_pretrained("my-model")
66
+
67
+ >>> # loading model and config from pretrained folder
68
+ >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
69
+ >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
70
+ ```"""
71
+
72
+ model_type = "encoder-decoder"
73
+ is_composition = True
74
+
75
+ def __init__(self, **kwargs):
76
+ super().__init__(**kwargs)
77
+ assert (
78
+ "encoder" in kwargs and "decoder" in kwargs
79
+ ), "Config has to be initialized with encoder and decoder config"
80
+ encoder_config = kwargs.pop("encoder")
81
+ encoder_model_type = encoder_config.pop("model_type")
82
+ decoder_config = kwargs.pop("decoder")
83
+ decoder_model_type = decoder_config.pop("model_type")
84
+
85
+ from ..auto.configuration_auto import AutoConfig
86
+
87
+ self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
88
+ self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
89
+ self.is_encoder_decoder = True
90
+
91
+ @classmethod
92
+ def from_encoder_decoder_configs(
93
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
94
+ ) -> PretrainedConfig:
95
+ r"""
96
+ Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
97
+ decoder model configuration.
98
+
99
+ Returns:
100
+ [`EncoderDecoderConfig`]: An instance of a configuration object
101
+ """
102
+ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
103
+ decoder_config.is_decoder = True
104
+ decoder_config.add_cross_attention = True
105
+
106
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support Encoder-Decoder architectures"""
16
+
17
+
18
+ import gc
19
+ import inspect
20
+ import os
21
+ import tempfile
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import torch
26
+ from torch import nn
27
+ from torch.nn import CrossEntropyLoss
28
+
29
+ from ...configuration_utils import PretrainedConfig
30
+ from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
+ from ..auto.configuration_auto import AutoConfig
34
+ from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
35
+ from .configuration_encoder_decoder import EncoderDecoderConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
41
+
42
+ DEPRECATION_WARNING = (
43
+ "Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
44
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
45
+ " fine-tuning a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the"
46
+ " labels, no need to pass them yourself anymore."
47
+ )
48
+
49
+ ENCODER_DECODER_START_DOCSTRING = r"""
50
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
51
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
52
+ [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
53
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
54
+ generative task, like summarization.
55
+
56
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
57
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
58
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
59
+ Zhou, Wei Li, Peter J. Liu.
60
+
61
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
62
+ (see the examples for more information).
63
+
64
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
65
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
66
+ etc.)
67
+
68
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
69
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
70
+ and behavior.
71
+
72
+ Parameters:
73
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
74
+ Initializing with a config file does not load the weights associated with the model, only the
75
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
76
+ """
77
+
78
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
79
+ Args:
80
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
81
+ Indices of input sequence tokens in the vocabulary.
82
+
83
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
84
+ [`PreTrainedTokenizer.__call__`] for details.
85
+
86
+ [What are input IDs?](../glossary#input-ids)
87
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
88
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
89
+
90
+ - 1 for tokens that are **not masked**,
91
+ - 0 for tokens that are **masked**.
92
+
93
+ [What are attention masks?](../glossary#attention-mask)
94
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
95
+ Indices of decoder input sequence tokens in the vocabulary.
96
+
97
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
98
+ [`PreTrainedTokenizer.__call__`] for details.
99
+
100
+ [What are input IDs?](../glossary#input-ids)
101
+
102
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
103
+ `past_key_values`).
104
+
105
+ For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
106
+ right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
107
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
109
+ be used by default.
110
+ encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
111
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
112
+ `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
113
+ of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
114
+ decoder.
115
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
116
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
117
+
118
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
119
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
120
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
121
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
122
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
123
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
124
+ model's internal embedding lookup matrix.
125
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
126
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
127
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
128
+ into associated vectors than the model's internal embedding lookup matrix.
129
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
130
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
131
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
132
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
133
+ use_cache (`bool`, *optional*):
134
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
135
+ `past_key_values`).
136
+ output_attentions (`bool`, *optional*):
137
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
138
+ tensors for more detail.
139
+ output_hidden_states (`bool`, *optional*):
140
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
141
+ more detail.
142
+ return_dict (`bool`, *optional*):
143
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
144
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
145
+
146
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
147
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
148
+ """
149
+
150
+
151
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
152
+ """
153
+ Shift input ids one token to the right.
154
+ """
155
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
156
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
157
+ if decoder_start_token_id is None:
158
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
159
+ shifted_input_ids[:, 0] = decoder_start_token_id
160
+
161
+ if pad_token_id is None:
162
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
163
+ # replace possible -100 values in labels by `pad_token_id`
164
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
165
+
166
+ return shifted_input_ids
167
+
168
+
169
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
170
+ class EncoderDecoderModel(PreTrainedModel):
171
+ r"""
172
+ [`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
173
+ of the base model classes of the library as encoder and another one as decoder when created with the
174
+ :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
175
+ :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
176
+ """
177
+
178
+ config_class = EncoderDecoderConfig
179
+ base_model_prefix = "encoder_decoder"
180
+ main_input_name = "input_ids"
181
+ supports_gradient_checkpointing = True
182
+
183
+ def __init__(
184
+ self,
185
+ config: Optional[PretrainedConfig] = None,
186
+ encoder: Optional[PreTrainedModel] = None,
187
+ decoder: Optional[PreTrainedModel] = None,
188
+ ):
189
+ if config is None and (encoder is None or decoder is None):
190
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
191
+ if config is None:
192
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
193
+ else:
194
+ if not isinstance(config, self.config_class):
195
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
196
+
197
+ if config.decoder.cross_attention_hidden_size is not None:
198
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
199
+ raise ValueError(
200
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
201
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
202
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
203
+ " `config.encoder.hidden_size`."
204
+ )
205
+
206
+ # initialize with config
207
+ super().__init__(config)
208
+
209
+ if encoder is None:
210
+ from ..auto.modeling_auto import AutoModel
211
+
212
+ encoder = AutoModel.from_config(config.encoder)
213
+
214
+ if decoder is None:
215
+ from ..auto.modeling_auto import AutoModelForCausalLM
216
+
217
+ decoder = AutoModelForCausalLM.from_config(config.decoder)
218
+
219
+ self.encoder = encoder
220
+ self.decoder = decoder
221
+
222
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
223
+ logger.warning(
224
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
225
+ f" {self.config.encoder}"
226
+ )
227
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
228
+ logger.warning(
229
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
230
+ f" {self.config.decoder}"
231
+ )
232
+
233
+ # make sure that the individual model's config refers to the shared config
234
+ # so that the updates to the config will be synced
235
+ self.encoder.config = self.config.encoder
236
+ self.decoder.config = self.config.decoder
237
+
238
+ # encoder outputs might need to be projected to different dimension for decoder
239
+ if (
240
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
241
+ and self.decoder.config.cross_attention_hidden_size is None
242
+ ):
243
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
244
+
245
+ if self.encoder.get_output_embeddings() is not None:
246
+ raise ValueError(
247
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
248
+ )
249
+
250
+ decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
251
+ if "encoder_hidden_states" not in decoder_signature:
252
+ raise ValueError(
253
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
254
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
255
+ )
256
+
257
+ # tie encoder, decoder weights if config set accordingly
258
+ self.tie_weights()
259
+
260
+ def tie_weights(self):
261
+ # tie encoder & decoder if needed
262
+ if self.config.tie_encoder_decoder:
263
+ # tie encoder and decoder base model
264
+ decoder_base_model_prefix = self.decoder.base_model_prefix
265
+ tied_weights = self._tie_encoder_decoder_weights(
266
+ self.encoder,
267
+ self.decoder._modules[decoder_base_model_prefix],
268
+ self.decoder.base_model_prefix,
269
+ "encoder",
270
+ )
271
+ # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class
272
+ # attributed not an instance member, therefore modifying it will modify the entire class
273
+ # Leading to issues on subsequent calls by different tests or subsequent calls.
274
+ self._dynamic_tied_weights_keys = tied_weights
275
+
276
+ def get_encoder(self):
277
+ return self.encoder
278
+
279
+ def get_decoder(self):
280
+ return self.decoder
281
+
282
+ def get_input_embeddings(self):
283
+ return self.encoder.get_input_embeddings()
284
+
285
+ def get_output_embeddings(self):
286
+ return self.decoder.get_output_embeddings()
287
+
288
+ def set_output_embeddings(self, new_embeddings):
289
+ return self.decoder.set_output_embeddings(new_embeddings)
290
+
291
+ @classmethod
292
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
293
+ r"""
294
+ Example:
295
+
296
+ ```python
297
+ >>> from transformers import EncoderDecoderModel
298
+
299
+ >>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
300
+ ```"""
301
+
302
+ from_tf = kwargs.pop("from_tf", False)
303
+ if from_tf:
304
+ from transformers import TFEncoderDecoderModel
305
+
306
+ # a workaround to load from tensorflow checkpoint
307
+ # Using `_tf_model` won't work, because the weight names in the encoder/decoder of `_tf_model` get
308
+ # extended before saving those components. For example, The name of `_tf_model.encoder.vit` is
309
+ # `[top model name]/encoder/vit`, but the name of `tf_model.encoder.vit` is `[top model name]/vit`. The
310
+ # [top model name] is handled (stripped) by the conversion method, and the former case gets extra `encoder`,
311
+ # which should not occur when we want to save the components alone.
312
+ # There was a (very) ugly potential fix, which wasn't integrated to `transformers`: see
313
+ # https://github.com/huggingface/transformers/pull/13222/commits/dbb3c9de76eee235791d2064094654637c99f36d#r697304245
314
+ # (the change in `src/transformers/modeling_tf_utils.py`)
315
+ _tf_model = TFEncoderDecoderModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
316
+ config = _tf_model.config
317
+
318
+ # Using `tf_model` instead
319
+ encoder = _tf_model.encoder.__class__(_tf_model.config.encoder)
320
+ decoder = _tf_model.decoder.__class__(_tf_model.config.decoder)
321
+ # Make sure models are built
322
+ encoder(encoder.dummy_inputs)
323
+ decoder(decoder.dummy_inputs)
324
+
325
+ # Get the variable correspondence between `_tf_model` and `encoder` and `decoder`
326
+ encoder_variables = {}
327
+ for v in encoder.trainable_variables + encoder.non_trainable_variables:
328
+ encoder_variables["/".join(v.name.split("/")[1:])] = v
329
+ decoder_variables = {}
330
+ for v in decoder.trainable_variables + decoder.non_trainable_variables:
331
+ decoder_variables["/".join(v.name.split("/")[1:])] = v
332
+
333
+ _encoder_variables = {}
334
+ for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables:
335
+ _encoder_variables["/".join(v.name.split("/")[2:])] = v
336
+ _decoder_variables = {}
337
+ for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables:
338
+ _decoder_variables["/".join(v.name.split("/")[2:])] = v
339
+
340
+ # assign weight values to `encoder` and `decoder` from `_tf_model`
341
+ for name, v in encoder_variables.items():
342
+ v.assign(_encoder_variables[name])
343
+ for name, v in decoder_variables.items():
344
+ v.assign(_decoder_variables[name])
345
+
346
+ tf_model = TFEncoderDecoderModel(encoder=encoder, decoder=decoder)
347
+
348
+ # Deal with `enc_to_dec_proj`
349
+ if hasattr(_tf_model, "enc_to_dec_proj"):
350
+ tf_model(tf_model.dummy_inputs)
351
+ tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel)
352
+ tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias)
353
+
354
+ with tempfile.TemporaryDirectory() as tmpdirname:
355
+ encoder_dir = os.path.join(tmpdirname, "encoder")
356
+ decoder_dir = os.path.join(tmpdirname, "decoder")
357
+ tf_model.encoder.save_pretrained(encoder_dir)
358
+ tf_model.decoder.save_pretrained(decoder_dir)
359
+
360
+ if hasattr(tf_model, "enc_to_dec_proj"):
361
+ enc_to_dec_proj_weight = torch.transpose(
362
+ torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0
363
+ )
364
+ enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy())
365
+
366
+ del _tf_model
367
+ del tf_model
368
+ gc.collect()
369
+
370
+ model = EncoderDecoderModel.from_encoder_decoder_pretrained(
371
+ encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True
372
+ )
373
+ # This is only for copying some specific attributes of this particular model.
374
+ model.config = config
375
+
376
+ if hasattr(model, "enc_to_dec_proj"):
377
+ model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous()
378
+ model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous()
379
+
380
+ return model
381
+
382
+ # At the moment fast initialization is not supported for composite models
383
+ if kwargs.get("_fast_init", False):
384
+ logger.warning(
385
+ "Fast initialization is currently not supported for EncoderDecoderModel. "
386
+ "Falling back to slow initialization..."
387
+ )
388
+ kwargs["_fast_init"] = False
389
+
390
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
391
+
392
+ @classmethod
393
+ def from_encoder_decoder_pretrained(
394
+ cls,
395
+ encoder_pretrained_model_name_or_path: str = None,
396
+ decoder_pretrained_model_name_or_path: str = None,
397
+ *model_args,
398
+ **kwargs,
399
+ ) -> PreTrainedModel:
400
+ r"""
401
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
402
+ checkpoints.
403
+
404
+
405
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
406
+ the model, you need to first set it back in training mode with `model.train()`.
407
+
408
+ Params:
409
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
410
+ Information necessary to initiate the encoder. Can be either:
411
+
412
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
413
+ - A path to a *directory* containing model weights saved using
414
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
415
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
416
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
417
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
418
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
419
+
420
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
421
+ Information necessary to initiate the decoder. Can be either:
422
+
423
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
424
+ - A path to a *directory* containing model weights saved using
425
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
426
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
427
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
428
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
429
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
430
+
431
+ model_args (remaining positional arguments, *optional*):
432
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
433
+
434
+ kwargs (remaining dictionary of keyword arguments, *optional*):
435
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
436
+ `output_attentions=True`).
437
+
438
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
439
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
440
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
441
+
442
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
443
+
444
+ Example:
445
+
446
+ ```python
447
+ >>> from transformers import EncoderDecoderModel
448
+
449
+ >>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
450
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
451
+ >>> # saving model after fine-tuning
452
+ >>> model.save_pretrained("./bert2bert")
453
+ >>> # load fine-tuned model
454
+ >>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
455
+ ```"""
456
+
457
+ kwargs_encoder = {
458
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
459
+ }
460
+
461
+ kwargs_decoder = {
462
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
463
+ }
464
+
465
+ # remove encoder, decoder kwargs from kwargs
466
+ for key in kwargs_encoder.keys():
467
+ del kwargs["encoder_" + key]
468
+ for key in kwargs_decoder.keys():
469
+ del kwargs["decoder_" + key]
470
+
471
+ # Load and initialize the encoder and decoder
472
+ # The distinction between encoder and decoder at the model level is made
473
+ # by the value of the flag `is_decoder` that we need to set correctly.
474
+ encoder = kwargs_encoder.pop("model", None)
475
+ if encoder is None:
476
+ if encoder_pretrained_model_name_or_path is None:
477
+ raise ValueError(
478
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
479
+ "to be defined."
480
+ )
481
+
482
+ if "config" not in kwargs_encoder:
483
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
484
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
485
+ )
486
+
487
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
488
+ logger.info(
489
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
490
+ "from a decoder model. Cross-attention and casual mask are disabled."
491
+ )
492
+ encoder_config.is_decoder = False
493
+ encoder_config.add_cross_attention = False
494
+
495
+ kwargs_encoder["config"] = encoder_config
496
+
497
+ encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
498
+
499
+ decoder = kwargs_decoder.pop("model", None)
500
+ if decoder is None:
501
+ if decoder_pretrained_model_name_or_path is None:
502
+ raise ValueError(
503
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
504
+ "to be defined."
505
+ )
506
+
507
+ if "config" not in kwargs_decoder:
508
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
509
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
510
+ )
511
+
512
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
513
+ logger.info(
514
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
515
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
516
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
517
+ )
518
+ decoder_config.is_decoder = True
519
+ decoder_config.add_cross_attention = True
520
+
521
+ kwargs_decoder["config"] = decoder_config
522
+
523
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
524
+ logger.warning(
525
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
526
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
527
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
528
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
529
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
530
+ )
531
+
532
+ decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
533
+
534
+ # instantiate config with corresponding kwargs
535
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
536
+ return cls(encoder=encoder, decoder=decoder, config=config)
537
+
538
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
539
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
540
+ def forward(
541
+ self,
542
+ input_ids: Optional[torch.LongTensor] = None,
543
+ attention_mask: Optional[torch.FloatTensor] = None,
544
+ decoder_input_ids: Optional[torch.LongTensor] = None,
545
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
546
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
547
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
548
+ inputs_embeds: Optional[torch.FloatTensor] = None,
549
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
550
+ labels: Optional[torch.LongTensor] = None,
551
+ use_cache: Optional[bool] = None,
552
+ output_attentions: Optional[bool] = None,
553
+ output_hidden_states: Optional[bool] = None,
554
+ return_dict: Optional[bool] = None,
555
+ **kwargs,
556
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
557
+ r"""
558
+ Returns:
559
+
560
+ Examples:
561
+
562
+ ```python
563
+ >>> from transformers import EncoderDecoderModel, BertTokenizer
564
+ >>> import torch
565
+
566
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
567
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
568
+ ... "google-bert/bert-base-uncased", "google-bert/bert-base-uncased"
569
+ ... ) # initialize Bert2Bert from pre-trained checkpoints
570
+
571
+ >>> # training
572
+ >>> model.config.decoder_start_token_id = tokenizer.cls_token_id
573
+ >>> model.config.pad_token_id = tokenizer.pad_token_id
574
+ >>> model.config.vocab_size = model.config.decoder.vocab_size
575
+
576
+ >>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
577
+ >>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
578
+ >>> outputs = model(input_ids=input_ids, labels=labels)
579
+ >>> loss, logits = outputs.loss, outputs.logits
580
+
581
+ >>> # save and load from pretrained
582
+ >>> model.save_pretrained("bert2bert")
583
+ >>> model = EncoderDecoderModel.from_pretrained("bert2bert")
584
+
585
+ >>> # generation
586
+ >>> generated = model.generate(input_ids)
587
+ ```"""
588
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
589
+
590
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
591
+
592
+ kwargs_decoder = {
593
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
594
+ }
595
+
596
+ if encoder_outputs is None:
597
+ encoder_outputs = self.encoder(
598
+ input_ids=input_ids,
599
+ attention_mask=attention_mask,
600
+ inputs_embeds=inputs_embeds,
601
+ output_attentions=output_attentions,
602
+ output_hidden_states=output_hidden_states,
603
+ return_dict=return_dict,
604
+ **kwargs_encoder,
605
+ )
606
+ elif isinstance(encoder_outputs, tuple):
607
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
608
+
609
+ encoder_hidden_states = encoder_outputs[0]
610
+
611
+ # optionally project encoder_hidden_states
612
+ if (
613
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
614
+ and self.decoder.config.cross_attention_hidden_size is None
615
+ ):
616
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
617
+
618
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
619
+ decoder_input_ids = shift_tokens_right(
620
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
621
+ )
622
+ if decoder_attention_mask is None:
623
+ decoder_attention_mask = decoder_input_ids.new_tensor(decoder_input_ids != self.config.pad_token_id)
624
+
625
+ # Decode
626
+ decoder_outputs = self.decoder(
627
+ input_ids=decoder_input_ids,
628
+ attention_mask=decoder_attention_mask,
629
+ encoder_hidden_states=encoder_hidden_states,
630
+ encoder_attention_mask=attention_mask,
631
+ inputs_embeds=decoder_inputs_embeds,
632
+ output_attentions=output_attentions,
633
+ output_hidden_states=output_hidden_states,
634
+ use_cache=use_cache,
635
+ past_key_values=past_key_values,
636
+ return_dict=return_dict,
637
+ **kwargs_decoder,
638
+ )
639
+
640
+ # Compute loss independent from decoder (as some shift the logits inside them)
641
+ loss = None
642
+ if labels is not None:
643
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
644
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
645
+ loss_fct = CrossEntropyLoss()
646
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
647
+
648
+ if not return_dict:
649
+ if loss is not None:
650
+ return (loss,) + decoder_outputs + encoder_outputs
651
+ else:
652
+ return decoder_outputs + encoder_outputs
653
+
654
+ return Seq2SeqLMOutput(
655
+ loss=loss,
656
+ logits=decoder_outputs.logits,
657
+ past_key_values=decoder_outputs.past_key_values,
658
+ decoder_hidden_states=decoder_outputs.hidden_states,
659
+ decoder_attentions=decoder_outputs.attentions,
660
+ cross_attentions=decoder_outputs.cross_attentions,
661
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
662
+ encoder_hidden_states=encoder_outputs.hidden_states,
663
+ encoder_attentions=encoder_outputs.attentions,
664
+ )
665
+
666
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
667
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
668
+
669
+ def prepare_inputs_for_generation(
670
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
671
+ ):
672
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
673
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
674
+ input_dict = {
675
+ "attention_mask": attention_mask,
676
+ "decoder_attention_mask": decoder_attention_mask,
677
+ "decoder_input_ids": decoder_inputs["input_ids"],
678
+ "encoder_outputs": encoder_outputs,
679
+ "past_key_values": decoder_inputs["past_key_values"],
680
+ "use_cache": use_cache,
681
+ }
682
+ return input_dict
683
+
684
+ def resize_token_embeddings(self, *args, **kwargs):
685
+ raise NotImplementedError(
686
+ "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
687
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
688
+ " model.decoder.resize_token_embeddings(...))"
689
+ )
690
+
691
+ def _reorder_cache(self, past_key_values, beam_idx):
692
+ # apply decoder cache reordering here
693
+ return self.decoder._reorder_cache(past_key_values, beam_idx)
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support Flax Encoder-Decoder architectures"""
16
+
17
+
18
+ import os
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import flax.linen as nn
22
+ import jax
23
+ import jax.numpy as jnp
24
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+ from jax.random import PRNGKey
28
+
29
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput
30
+ from ...modeling_flax_utils import FlaxPreTrainedModel
31
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
32
+ from ..auto.configuration_auto import AutoConfig
33
+ from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM
34
+ from .configuration_encoder_decoder import EncoderDecoderConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
40
+
41
+ ENCODER_DECODER_START_DOCSTRING = r"""
42
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
43
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
44
+ [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
45
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
46
+ generative task, like summarization.
47
+
48
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
49
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
50
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
51
+ Zhou, Wei Li, Peter J. Liu.
52
+
53
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
54
+ (see the examples for more information).
55
+
56
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
57
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
58
+ etc.)
59
+
60
+ This model is also a Flax Linen
61
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
62
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
63
+
64
+ Parameters:
65
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
66
+ Initializing with a config file does not load the weights associated with the model, only the
67
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
68
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
69
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
70
+ `jax.numpy.bfloat16` (on TPUs).
71
+
72
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
73
+ specified all the computation will be performed with the given `dtype`.
74
+
75
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
76
+ parameters.**
77
+
78
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
79
+ [`~FlaxPreTrainedModel.to_bf16`].
80
+ """
81
+
82
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
83
+ Args:
84
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
85
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
86
+ it.
87
+
88
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
89
+ [`PreTrainedTokenizer.__call__`] for details.
90
+
91
+ [What are input IDs?](../glossary#input-ids)
92
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
93
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
94
+
95
+ - 1 for tokens that are **not masked**,
96
+ - 0 for tokens that are **masked**.
97
+
98
+ [What are attention masks?](../glossary#attention-mask)
99
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
100
+ Indices of decoder input sequence tokens in the vocabulary.
101
+
102
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
103
+ [`PreTrainedTokenizer.__call__`] for details.
104
+
105
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
106
+
107
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
108
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
109
+ and prepending them with the `decoder_start_token_id`.
110
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
111
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
112
+ be used by default.
113
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
114
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
115
+ config.encoder.max_position_embeddings - 1]`.
116
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
117
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
118
+ range `[0, config.decoder.max_position_embeddings - 1]`.
119
+ output_attentions (`bool`, *optional*):
120
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
121
+ tensors for more detail.
122
+ output_hidden_states (`bool`, *optional*):
123
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
124
+ more detail.
125
+ return_dict (`bool`, *optional*):
126
+ If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple.
127
+ """
128
+
129
+ ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r"""
130
+ Args:
131
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
132
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
133
+ it.
134
+
135
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
136
+ [`PreTrainedTokenizer.__call__`] for details.
137
+
138
+ [What are input IDs?](../glossary#input-ids)
139
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
140
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
141
+
142
+ - 1 for tokens that are **not masked**,
143
+ - 0 for tokens that are **masked**.
144
+
145
+ [What are attention masks?](../glossary#attention-mask)
146
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
147
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
148
+ config.encoder.max_position_embeddings - 1]`.
149
+ output_attentions (`bool`, *optional*):
150
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
151
+ tensors for more detail.
152
+ output_hidden_states (`bool`, *optional*):
153
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
154
+ more detail.
155
+ return_dict (`bool`, *optional*):
156
+ If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple.
157
+ """
158
+
159
+ ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r"""
160
+ Args:
161
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
162
+ Indices of decoder input sequence tokens in the vocabulary.
163
+
164
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
165
+ [`PreTrainedTokenizer.__call__`] for details.
166
+
167
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
168
+
169
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
170
+ `past_key_values`).
171
+
172
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
173
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
174
+ and prepending them with the `decoder_start_token_id`.
175
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
176
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
177
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
178
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
179
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
180
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
181
+
182
+ - 1 for tokens that are **not masked**,
183
+ - 0 for tokens that are **masked**.
184
+
185
+ [What are attention masks?](../glossary#attention-mask)
186
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
187
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
188
+ be used by default.
189
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
190
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
191
+ range `[0, config.decoder.max_position_embeddings - 1]`.
192
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
193
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
194
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
195
+ output_attentions (`bool`, *optional*):
196
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
197
+ tensors for more detail.
198
+ output_hidden_states (`bool`, *optional*):
199
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
200
+ more detail.
201
+ return_dict (`bool`, *optional*):
202
+ If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a
203
+ plain tuple.
204
+ """
205
+
206
+
207
+ class FlaxEncoderDecoderModule(nn.Module):
208
+ config: EncoderDecoderConfig
209
+ dtype: jnp.dtype = jnp.float32
210
+
211
+ def setup(self):
212
+ encoder_config = self.config.encoder
213
+ decoder_config = self.config.decoder
214
+
215
+ # Copied from `modeling_hybrid_clip.py` with modifications.
216
+ from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING
217
+
218
+ encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class
219
+ decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class
220
+
221
+ self.encoder = encoder_module(encoder_config, dtype=self.dtype)
222
+ self.decoder = decoder_module(decoder_config, dtype=self.dtype)
223
+
224
+ # encoder outputs might need to be projected to different dimension for decoder
225
+ if (
226
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
227
+ and self.decoder.config.cross_attention_hidden_size is None
228
+ ):
229
+ self.enc_to_dec_proj = nn.Dense(
230
+ self.decoder.config.hidden_size,
231
+ kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range),
232
+ dtype=self.dtype,
233
+ )
234
+ else:
235
+ self.enc_to_dec_proj = None
236
+
237
+ def _get_encoder_module(self):
238
+ return self.encoder
239
+
240
+ def _get_projection_module(self):
241
+ return self.enc_to_dec_proj
242
+
243
+ def _get_decoder_module(self):
244
+ return self.decoder
245
+
246
+ def __call__(
247
+ self,
248
+ input_ids,
249
+ attention_mask,
250
+ decoder_input_ids,
251
+ decoder_attention_mask,
252
+ position_ids,
253
+ decoder_position_ids,
254
+ output_attentions: bool = False,
255
+ output_hidden_states: bool = False,
256
+ return_dict: bool = True,
257
+ deterministic: bool = True,
258
+ ):
259
+ encoder_outputs = self.encoder(
260
+ input_ids=input_ids,
261
+ attention_mask=attention_mask,
262
+ position_ids=position_ids,
263
+ output_attentions=output_attentions,
264
+ output_hidden_states=output_hidden_states,
265
+ return_dict=return_dict,
266
+ deterministic=deterministic,
267
+ )
268
+
269
+ encoder_hidden_states = encoder_outputs[0]
270
+
271
+ # optionally project encoder_hidden_states
272
+ if self.enc_to_dec_proj is not None:
273
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
274
+
275
+ decoder_outputs = self.decoder(
276
+ input_ids=decoder_input_ids,
277
+ attention_mask=decoder_attention_mask,
278
+ position_ids=decoder_position_ids,
279
+ encoder_hidden_states=encoder_hidden_states,
280
+ encoder_attention_mask=attention_mask,
281
+ output_attentions=output_attentions,
282
+ output_hidden_states=output_hidden_states,
283
+ return_dict=return_dict,
284
+ deterministic=deterministic,
285
+ )
286
+
287
+ if not return_dict:
288
+ return decoder_outputs + encoder_outputs
289
+
290
+ return FlaxSeq2SeqLMOutput(
291
+ logits=decoder_outputs.logits,
292
+ decoder_hidden_states=decoder_outputs.hidden_states,
293
+ decoder_attentions=decoder_outputs.attentions,
294
+ cross_attentions=decoder_outputs.cross_attentions,
295
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
296
+ encoder_hidden_states=encoder_outputs.hidden_states,
297
+ encoder_attentions=encoder_outputs.attentions,
298
+ )
299
+
300
+
301
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
302
+ class FlaxEncoderDecoderModel(FlaxPreTrainedModel):
303
+ r"""
304
+ [`FlaxEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
305
+ the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as
306
+ decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the
307
+ encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder.
308
+ """
309
+
310
+ config_class = EncoderDecoderConfig
311
+ base_model_prefix = "encoder_decoder"
312
+ module_class = FlaxEncoderDecoderModule
313
+
314
+ def __init__(
315
+ self,
316
+ config: EncoderDecoderConfig,
317
+ input_shape: Optional[Tuple] = None,
318
+ seed: int = 0,
319
+ dtype: jnp.dtype = jnp.float32,
320
+ _do_init: bool = True,
321
+ **kwargs,
322
+ ):
323
+ if input_shape is None:
324
+ input_shape = ((1, 1), (1, 1))
325
+
326
+ if not _do_init:
327
+ raise ValueError(
328
+ "`FlaxEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`."
329
+ )
330
+
331
+ if config.decoder.cross_attention_hidden_size is not None:
332
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
333
+ raise ValueError(
334
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
335
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
336
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
337
+ " `config.encoder.hidden_size`."
338
+ )
339
+
340
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
341
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
342
+
343
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
344
+ encoder_input_shape, decoder_input_shape = input_shape
345
+
346
+ # init input tensors
347
+ input_ids = jnp.zeros(encoder_input_shape, dtype="i4")
348
+ attention_mask = jnp.ones_like(input_ids)
349
+ decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4")
350
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
351
+
352
+ batch_size, sequence_length = input_ids.shape
353
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
354
+
355
+ decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape
356
+ if not decoder_batch_size == batch_size:
357
+ raise ValueError(
358
+ f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder"
359
+ f" and {decoder_batch_size} for decoder."
360
+ )
361
+ decoder_position_ids = jnp.broadcast_to(
362
+ jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length)
363
+ )
364
+
365
+ params_rng, dropout_rng = jax.random.split(rng)
366
+ rngs = {"params": params_rng, "dropout": dropout_rng}
367
+
368
+ random_params = self.module.init(
369
+ rngs,
370
+ input_ids,
371
+ attention_mask,
372
+ decoder_input_ids,
373
+ decoder_attention_mask,
374
+ position_ids,
375
+ decoder_position_ids,
376
+ )["params"]
377
+
378
+ if params is not None:
379
+ random_params = flatten_dict(unfreeze(random_params))
380
+ params = flatten_dict(unfreeze(params))
381
+ for missing_key in self._missing_keys:
382
+ params[missing_key] = random_params[missing_key]
383
+ self._missing_keys = set()
384
+ return freeze(unflatten_dict(params))
385
+ else:
386
+ return random_params
387
+
388
+ def init_cache(self, batch_size, max_length, encoder_outputs):
389
+ r"""
390
+ Args:
391
+ batch_size (`int`):
392
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
393
+ max_length (`int`):
394
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
395
+ cache.
396
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
397
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
398
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
399
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
400
+ cross-attention of the decoder.
401
+ """
402
+ # init input variables to retrieve cache
403
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
404
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
405
+ decoder_position_ids = jnp.broadcast_to(
406
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
407
+ )
408
+
409
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
410
+ decoder_module = module._get_decoder_module()
411
+ return decoder_module(
412
+ input_ids=decoder_input_ids,
413
+ attention_mask=decoder_attention_mask,
414
+ position_ids=decoder_position_ids,
415
+ **kwargs,
416
+ )
417
+
418
+ init_variables = self.module.init(
419
+ jax.random.PRNGKey(0),
420
+ decoder_input_ids=decoder_input_ids,
421
+ decoder_attention_mask=decoder_attention_mask,
422
+ decoder_position_ids=decoder_position_ids,
423
+ encoder_hidden_states=encoder_outputs[0],
424
+ init_cache=True,
425
+ method=_decoder_forward, # we only need to call the decoder to init the cache
426
+ )
427
+ return unfreeze(init_variables["cache"])
428
+
429
+ @add_start_docstrings(ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING)
430
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC)
431
+ def encode(
432
+ self,
433
+ input_ids: jnp.ndarray,
434
+ attention_mask: Optional[jnp.ndarray] = None,
435
+ position_ids: Optional[jnp.ndarray] = None,
436
+ output_attentions: Optional[bool] = None,
437
+ output_hidden_states: Optional[bool] = None,
438
+ return_dict: Optional[bool] = None,
439
+ train: bool = False,
440
+ params: dict = None,
441
+ dropout_rng: PRNGKey = None,
442
+ ):
443
+ r"""
444
+ Returns:
445
+
446
+ Example:
447
+
448
+ ```python
449
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer
450
+
451
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
452
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
453
+
454
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
455
+
456
+ >>> text = "My friends are cool but they eat too many carbs."
457
+ >>> input_ids = tokenizer.encode(text, return_tensors="np")
458
+ >>> encoder_outputs = model.encode(input_ids)
459
+ ```"""
460
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
461
+ output_hidden_states = (
462
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
463
+ )
464
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
465
+
466
+ if attention_mask is None:
467
+ attention_mask = jnp.ones_like(input_ids)
468
+ if position_ids is None:
469
+ batch_size, sequence_length = input_ids.shape
470
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
471
+
472
+ # Handle any PRNG if needed
473
+ rngs = {}
474
+ if dropout_rng is not None:
475
+ rngs["dropout"] = dropout_rng
476
+
477
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
478
+ encode_module = module._get_encoder_module()
479
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
480
+
481
+ outputs = self.module.apply(
482
+ {"params": params or self.params},
483
+ input_ids=jnp.array(input_ids, dtype="i4"),
484
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
485
+ position_ids=jnp.array(position_ids, dtype="i4"),
486
+ output_attentions=output_attentions,
487
+ output_hidden_states=output_hidden_states,
488
+ return_dict=return_dict,
489
+ deterministic=not train,
490
+ rngs=rngs,
491
+ method=_encoder_forward,
492
+ )
493
+
494
+ if return_dict:
495
+ outputs = FlaxBaseModelOutput(
496
+ last_hidden_state=outputs.last_hidden_state,
497
+ hidden_states=outputs.hidden_states,
498
+ attentions=outputs.attentions,
499
+ )
500
+
501
+ return outputs
502
+
503
+ @add_start_docstrings(ENCODER_DECODER_DECODE_INPUTS_DOCSTRING)
504
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
505
+ def decode(
506
+ self,
507
+ decoder_input_ids,
508
+ encoder_outputs,
509
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
510
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
511
+ decoder_position_ids: Optional[jnp.ndarray] = None,
512
+ past_key_values: dict = None,
513
+ output_attentions: Optional[bool] = None,
514
+ output_hidden_states: Optional[bool] = None,
515
+ return_dict: Optional[bool] = None,
516
+ train: bool = False,
517
+ params: dict = None,
518
+ dropout_rng: PRNGKey = None,
519
+ ):
520
+ r"""
521
+ Returns:
522
+
523
+ Example:
524
+
525
+ ```python
526
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer
527
+ >>> import jax.numpy as jnp
528
+
529
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
530
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
531
+
532
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
533
+
534
+ >>> text = "My friends are cool but they eat too many carbs."
535
+ >>> input_ids = tokenizer.encode(text, max_length=1024, return_tensors="np")
536
+ >>> encoder_outputs = model.encode(input_ids)
537
+
538
+ >>> decoder_start_token_id = model.config.decoder.bos_token_id
539
+ >>> decoder_input_ids = jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
540
+
541
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
542
+ >>> logits = outputs.logits
543
+ ```"""
544
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
545
+ output_hidden_states = (
546
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
547
+ )
548
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
549
+
550
+ encoder_hidden_states = encoder_outputs[0]
551
+ if encoder_attention_mask is None:
552
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
553
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
554
+
555
+ batch_size, sequence_length = decoder_input_ids.shape
556
+ if decoder_attention_mask is None:
557
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
558
+
559
+ if decoder_position_ids is None:
560
+ if past_key_values is not None:
561
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
562
+
563
+ decoder_position_ids = jnp.broadcast_to(
564
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
565
+ )
566
+
567
+ # Handle any PRNG if needed
568
+ rngs = {}
569
+ if dropout_rng is not None:
570
+ rngs["dropout"] = dropout_rng
571
+
572
+ inputs = {"params": params or self.params}
573
+
574
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
575
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
576
+ # it can be changed by FlaxBartAttention module
577
+ if past_key_values:
578
+ inputs["cache"] = past_key_values
579
+ mutable = ["cache"]
580
+ else:
581
+ mutable = False
582
+
583
+ def _decoder_forward(
584
+ module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs
585
+ ):
586
+ projection_module = module._get_projection_module()
587
+ decoder_module = module._get_decoder_module()
588
+
589
+ # optionally project encoder_hidden_states
590
+ if projection_module is not None:
591
+ encoder_hidden_states = projection_module(encoder_hidden_states)
592
+
593
+ return decoder_module(
594
+ decoder_input_ids,
595
+ decoder_attention_mask,
596
+ decoder_position_ids,
597
+ encoder_hidden_states=encoder_hidden_states,
598
+ **kwargs,
599
+ )
600
+
601
+ outputs = self.module.apply(
602
+ inputs,
603
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
604
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
605
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
606
+ encoder_hidden_states=encoder_hidden_states,
607
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
608
+ output_attentions=output_attentions,
609
+ output_hidden_states=output_hidden_states,
610
+ return_dict=return_dict,
611
+ deterministic=not train,
612
+ rngs=rngs,
613
+ mutable=mutable,
614
+ method=_decoder_forward,
615
+ )
616
+
617
+ # add updated cache to model output
618
+ if past_key_values is not None and return_dict:
619
+ outputs, past = outputs
620
+ outputs["past_key_values"] = unfreeze(past["cache"])
621
+ return outputs
622
+ elif past_key_values is not None and not return_dict:
623
+ outputs, past = outputs
624
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
625
+
626
+ return outputs
627
+
628
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
629
+ @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
630
+ def __call__(
631
+ self,
632
+ input_ids: jnp.ndarray,
633
+ attention_mask: Optional[jnp.ndarray] = None,
634
+ decoder_input_ids: Optional[jnp.ndarray] = None,
635
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
636
+ position_ids: Optional[jnp.ndarray] = None,
637
+ decoder_position_ids: Optional[jnp.ndarray] = None,
638
+ output_attentions: Optional[bool] = None,
639
+ output_hidden_states: Optional[bool] = None,
640
+ return_dict: Optional[bool] = None,
641
+ train: bool = False,
642
+ params: dict = None,
643
+ dropout_rng: PRNGKey = None,
644
+ ):
645
+ r"""
646
+ Returns:
647
+
648
+ Examples:
649
+
650
+ ```python
651
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer, GPT2Tokenizer
652
+
653
+ >>> # load a fine-tuned bert2gpt2 model
654
+ >>> model = FlaxEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16")
655
+ >>> # load input & output tokenizer
656
+ >>> tokenizer_input = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
657
+ >>> tokenizer_output = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
658
+
659
+ >>> article = '''Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members
660
+ >>> singing a racist chant. SAE's national chapter suspended the students,
661
+ >>> but University of Oklahoma President David Boren took it a step further,
662
+ >>> saying the university's affiliation with the fraternity is permanently done.'''
663
+
664
+ >>> input_ids = tokenizer_input(article, add_special_tokens=True, return_tensors="np").input_ids
665
+
666
+ >>> # use GPT2's eos_token as the pad as well as eos token
667
+ >>> model.config.eos_token_id = model.config.decoder.eos_token_id
668
+ >>> model.config.pad_token_id = model.config.eos_token_id
669
+
670
+ >>> sequences = model.generate(input_ids, num_beams=4, max_length=12).sequences
671
+
672
+ >>> summary = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)[0]
673
+ >>> assert summary == "SAS Alpha Epsilon suspended Sigma Alpha Epsilon members"
674
+ ```
675
+ """
676
+
677
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
678
+ output_hidden_states = (
679
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
680
+ )
681
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
682
+
683
+ # prepare encoder inputs
684
+ if attention_mask is None:
685
+ attention_mask = jnp.ones_like(input_ids)
686
+ if position_ids is None:
687
+ batch_size, sequence_length = input_ids.shape
688
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
689
+
690
+ # prepare decoder inputs
691
+ if decoder_input_ids is None:
692
+ raise ValueError(
693
+ "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must"
694
+ " be specified as an input argument."
695
+ )
696
+ if decoder_attention_mask is None:
697
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
698
+ if decoder_position_ids is None:
699
+ batch_size, sequence_length = decoder_input_ids.shape
700
+ decoder_position_ids = jnp.broadcast_to(
701
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
702
+ )
703
+
704
+ # Handle any PRNG if needed
705
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
706
+
707
+ return self.module.apply(
708
+ {"params": params or self.params},
709
+ input_ids=jnp.array(input_ids, dtype="i4"),
710
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
711
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
712
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
713
+ position_ids=jnp.array(position_ids, dtype="i4"),
714
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
715
+ output_attentions=output_attentions,
716
+ output_hidden_states=output_hidden_states,
717
+ return_dict=return_dict,
718
+ deterministic=not train,
719
+ rngs=rngs,
720
+ )
721
+
722
+ def prepare_inputs_for_generation(
723
+ self,
724
+ decoder_input_ids,
725
+ max_length,
726
+ attention_mask: Optional[jax.Array] = None,
727
+ decoder_attention_mask: Optional[jax.Array] = None,
728
+ encoder_outputs=None,
729
+ **kwargs,
730
+ ):
731
+ # initializing the cache
732
+ batch_size, seq_length = decoder_input_ids.shape
733
+
734
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
735
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
736
+ # But since the decoder uses a causal mask, those positions are masked anyways.
737
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
738
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
739
+ if decoder_attention_mask is not None:
740
+ decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
741
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
742
+ else:
743
+ decoder_position_ids = jnp.broadcast_to(
744
+ jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
745
+ )
746
+
747
+ return {
748
+ "past_key_values": past_key_values,
749
+ "encoder_outputs": encoder_outputs,
750
+ "encoder_attention_mask": attention_mask,
751
+ "decoder_attention_mask": extended_attention_mask,
752
+ "decoder_position_ids": decoder_position_ids,
753
+ }
754
+
755
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
756
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
757
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
758
+ return model_kwargs
759
+
760
+ @classmethod
761
+ def from_encoder_decoder_pretrained(
762
+ cls,
763
+ encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
764
+ decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
765
+ *model_args,
766
+ **kwargs,
767
+ ) -> FlaxPreTrainedModel:
768
+ r"""
769
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
770
+ checkpoints.
771
+
772
+ Params:
773
+ encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*):
774
+ Information necessary to initiate the encoder. Can be either:
775
+
776
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
777
+ - A path to a *directory* containing model weights saved using
778
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
779
+
780
+ decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`):
781
+ Information necessary to initiate the decoder. Can be either:
782
+
783
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
784
+ - A path to a *directory* containing model weights saved using
785
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
786
+
787
+ model_args (remaining positional arguments, *optional*):
788
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
789
+
790
+ kwargs (remaining dictionary of keyword arguments, *optional*):
791
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
792
+ `output_attentions=True`).
793
+
794
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
795
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
796
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
797
+
798
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
799
+
800
+ Example:
801
+
802
+ ```python
803
+ >>> from transformers import FlaxEncoderDecoderModel
804
+
805
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
806
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
807
+ >>> # saving model after fine-tuning
808
+ >>> model.save_pretrained("./bert2gpt2")
809
+ >>> # load fine-tuned model
810
+ >>> model = FlaxEncoderDecoderModel.from_pretrained("./bert2gpt2")
811
+ ```"""
812
+
813
+ kwargs_encoder = {
814
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
815
+ }
816
+
817
+ kwargs_decoder = {
818
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
819
+ }
820
+
821
+ # remove encoder, decoder kwargs from kwargs
822
+ for key in kwargs_encoder.keys():
823
+ del kwargs["encoder_" + key]
824
+ for key in kwargs_decoder.keys():
825
+ del kwargs["decoder_" + key]
826
+
827
+ # Load and initialize the encoder and decoder
828
+ # The distinction between encoder and decoder at the model level is made
829
+ # by the value of the flag `is_decoder` that we need to set correctly.
830
+ encoder = kwargs_encoder.pop("model", None)
831
+ if encoder is None:
832
+ if encoder_pretrained_model_name_or_path is None:
833
+ raise ValueError(
834
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
835
+ "to be defined."
836
+ )
837
+
838
+ if "config" not in kwargs_encoder:
839
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
840
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
841
+ )
842
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
843
+ logger.info(
844
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
845
+ "from a decoder model. Cross-attention and casual mask are disabled."
846
+ )
847
+ encoder_config.is_decoder = False
848
+ encoder_config.add_cross_attention = False
849
+
850
+ kwargs_encoder["config"] = encoder_config
851
+
852
+ encoder = FlaxAutoModel.from_pretrained(
853
+ encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
854
+ )
855
+
856
+ decoder = kwargs_decoder.pop("model", None)
857
+ if decoder is None:
858
+ if decoder_pretrained_model_name_or_path is None:
859
+ raise ValueError(
860
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
861
+ "to be defined."
862
+ )
863
+
864
+ if "config" not in kwargs_decoder:
865
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
866
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
867
+ )
868
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
869
+ logger.info(
870
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
871
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
872
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
873
+ )
874
+ decoder_config.is_decoder = True
875
+ decoder_config.add_cross_attention = True
876
+
877
+ kwargs_decoder["config"] = decoder_config
878
+
879
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
880
+ logger.warning(
881
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
882
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
883
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
884
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
885
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
886
+ )
887
+
888
+ decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
889
+
890
+ # instantiate config with corresponding kwargs
891
+ dtype = kwargs.pop("dtype", jnp.float32)
892
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
893
+
894
+ # init model
895
+ model = cls(config, dtype=dtype)
896
+ model.params["encoder"] = encoder.params
897
+ model.params["decoder"] = decoder.params
898
+
899
+ return model
venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support TF Encoder-Decoder architectures"""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import inspect
21
+ import re
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...configuration_utils import PretrainedConfig
29
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
30
+ from ...modeling_tf_utils import (
31
+ TFCausalLanguageModelingLoss,
32
+ TFModelInputType,
33
+ TFPreTrainedModel,
34
+ get_initializer,
35
+ keras,
36
+ unpack_inputs,
37
+ )
38
+ from ...tf_utils import shape_list
39
+ from ...utils import (
40
+ ModelOutput,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from ..auto.configuration_auto import AutoConfig
47
+ from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
48
+ from .configuration_encoder_decoder import EncoderDecoderConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
54
+
55
+ DEPRECATION_WARNING = (
56
+ "Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
57
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
58
+ " fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
59
+ " labels, no need to pass them yourself anymore."
60
+ )
61
+
62
+ ENCODER_DECODER_START_DOCSTRING = r"""
63
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
64
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
65
+ [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
66
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
67
+ generative task, like summarization.
68
+
69
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
70
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
71
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
72
+ Zhou, Wei Li, Peter J. Liu.
73
+
74
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
75
+ (see the examples for more information).
76
+
77
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
78
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
79
+ etc.)
80
+
81
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
82
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
83
+ behavior.
84
+
85
+ Parameters:
86
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
87
+ Initializing with a config file does not load the weights associated with the model, only the
88
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
89
+ """
90
+
91
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
92
+ Args:
93
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
94
+ Indices of input sequence tokens in the vocabulary.
95
+
96
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
97
+ [`PreTrainedTokenizer.__call__`] for details.
98
+
99
+ [What are input IDs?](../glossary#input-ids)
100
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
101
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
102
+
103
+ - 1 for tokens that are **not masked**,
104
+ - 0 for tokens that are **masked**.
105
+
106
+ [What are attention masks?](../glossary#attention-mask)
107
+ decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Indices of decoder input sequence tokens in the vocabulary.
109
+
110
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
111
+ [`PreTrainedTokenizer.__call__`] for details.
112
+
113
+ [What are input IDs?](../glossary#input-ids)
114
+
115
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
116
+ `past_key_values`).
117
+
118
+ Provide for sequence to sequence training to the decoder. Indices can be obtained using
119
+ [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
120
+ details.
121
+ decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
122
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
123
+ be used by default.
124
+ encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
125
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
126
+ `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
127
+ of the last layer of the encoder. Used in the cross-attention of the decoder.
128
+ past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
129
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
130
+
131
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
132
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
133
+ `decoder_input_ids` of shape `({0})`.
134
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
135
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
136
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
137
+ model's internal embedding lookup matrix.
138
+ decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
139
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
140
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
141
+ into associated vectors than the model's internal embedding lookup matrix.
142
+ labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
143
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
144
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
145
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
146
+ use_cache (`bool`, *optional*):
147
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
148
+ `past_key_values`).
149
+ output_attentions (`bool`, *optional*):
150
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
151
+ tensors for more detail.
152
+ output_hidden_states (`bool`, *optional*):
153
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
154
+ more detail.
155
+ return_dict (`bool`, *optional*):
156
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
157
+ training (`bool`, *optional*, defaults to `False`):
158
+ Whether or not to use the model in training mode (some modules like dropout modules have different
159
+ behaviors between training and evaluation).
160
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
161
+
162
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
163
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.
164
+ """
165
+
166
+
167
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
168
+ if pad_token_id is None:
169
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
170
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
171
+
172
+ if decoder_start_token_id is None:
173
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
174
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
175
+
176
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
177
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
178
+ # replace possible -100 values in labels by `pad_token_id`
179
+ shifted_input_ids = tf.where(
180
+ shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
181
+ )
182
+
183
+ # "Verify that `labels` has only positive values and -100"
184
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
185
+
186
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
187
+ with tf.control_dependencies([assert_gte0]):
188
+ shifted_input_ids = tf.identity(shifted_input_ids)
189
+
190
+ return shifted_input_ids
191
+
192
+
193
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
194
+ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
195
+ r"""
196
+ [`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
197
+ of the base model classes of the library as encoder and another one as decoder when created with the
198
+ [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
199
+ method for the decoder.
200
+ """
201
+
202
+ config_class = EncoderDecoderConfig
203
+ base_model_prefix = "encoder_decoder"
204
+ load_weight_prefix = "tf_encoder_decoder_model"
205
+
206
+ def __init__(
207
+ self,
208
+ config: Optional[PretrainedConfig] = None,
209
+ encoder: Optional[TFPreTrainedModel] = None,
210
+ decoder: Optional[TFPreTrainedModel] = None,
211
+ ):
212
+ if config is None and (encoder is None or decoder is None):
213
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
214
+ if config is None:
215
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
216
+ else:
217
+ if not isinstance(config, self.config_class):
218
+ raise ValueError(f"config: {config} has to be of type {self.config_class}")
219
+
220
+ if config.decoder.cross_attention_hidden_size is not None:
221
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
222
+ raise ValueError(
223
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
224
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
225
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
226
+ " `config.encoder.hidden_size`."
227
+ )
228
+
229
+ # initialize with config
230
+ super().__init__(config)
231
+
232
+ if encoder is None:
233
+ encoder = TFAutoModel.from_config(config.encoder, name="encoder")
234
+
235
+ if decoder is None:
236
+ decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
237
+
238
+ self.encoder = encoder
239
+ self.decoder = decoder
240
+
241
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
242
+ logger.warning(
243
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
244
+ f" {self.config.encoder}"
245
+ )
246
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
247
+ logger.warning(
248
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
249
+ f" {self.config.decoder}"
250
+ )
251
+
252
+ # make sure that the individual model's config refers to the shared config
253
+ # so that the updates to the config will be synced
254
+ self.encoder.config = self.config.encoder
255
+ self.decoder.config = self.config.decoder
256
+
257
+ # encoder outputs might need to be projected to different dimension for decoder
258
+ if (
259
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
260
+ and self.decoder.config.cross_attention_hidden_size is None
261
+ ):
262
+ self.enc_to_dec_proj = keras.layers.Dense(
263
+ units=self.decoder.config.hidden_size,
264
+ kernel_initializer=get_initializer(config.encoder.initializer_range),
265
+ name="enc_to_dec_proj",
266
+ )
267
+
268
+ if self.encoder.get_output_embeddings() is not None:
269
+ raise ValueError(
270
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
271
+ )
272
+
273
+ decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys())
274
+ if "encoder_hidden_states" not in decoder_signature:
275
+ raise ValueError(
276
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
277
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
278
+ )
279
+
280
+ def get_encoder(self):
281
+ return self.encoder
282
+
283
+ def get_decoder(self):
284
+ return self.decoder
285
+
286
+ def get_input_embeddings(self):
287
+ return self.encoder.get_input_embeddings()
288
+
289
+ def get_output_embeddings(self):
290
+ return self.decoder.get_output_embeddings()
291
+
292
+ def set_output_embeddings(self, new_embeddings):
293
+ return self.decoder.set_output_embeddings(new_embeddings)
294
+
295
+ def tf_to_pt_weight_rename(self, tf_weight):
296
+ # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models
297
+ # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal.
298
+ # However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption
299
+ # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's
300
+ # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name!
301
+
302
+ # This override is only needed in the case where we're crossloading weights from PT. However, since weights are
303
+ # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file.
304
+ # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it
305
+ # or not.
306
+ encoder_model_type = self.config.encoder.model_type
307
+ if "encoder" in tf_weight and "decoder" not in tf_weight:
308
+ return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),)
309
+ else:
310
+ return (tf_weight,)
311
+
312
+ @classmethod
313
+ def from_encoder_decoder_pretrained(
314
+ cls,
315
+ encoder_pretrained_model_name_or_path: str = None,
316
+ decoder_pretrained_model_name_or_path: str = None,
317
+ *model_args,
318
+ **kwargs,
319
+ ) -> TFPreTrainedModel:
320
+ r"""
321
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
322
+ checkpoints.
323
+
324
+
325
+ Params:
326
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
327
+ Information necessary to initiate the encoder. Can be either:
328
+
329
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
330
+ - A path to a *directory* containing model weights saved using
331
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
332
+ - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
333
+ `encoder_from_pt` should be set to `True`.
334
+
335
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
336
+ Information necessary to initiate the decoder. Can be either:
337
+
338
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
339
+ - A path to a *directory* containing model weights saved using
340
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
341
+ - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
342
+ `decoder_from_pt` should be set to `True`.
343
+
344
+ model_args (remaining positional arguments, *optional*):
345
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
346
+
347
+ kwargs (remaining dictionary of keyword arguments, *optional*):
348
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
349
+ `output_attentions=True`).
350
+
351
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
352
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
353
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
354
+
355
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
356
+
357
+ Example:
358
+
359
+ ```python
360
+ >>> from transformers import TFEncoderDecoderModel
361
+
362
+ >>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
363
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "openai-community/gpt2")
364
+ >>> # saving model after fine-tuning
365
+ >>> model.save_pretrained("./bert2gpt2")
366
+ >>> # load fine-tuned model
367
+ >>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2")
368
+ ```"""
369
+
370
+ kwargs_encoder = {
371
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
372
+ }
373
+
374
+ kwargs_decoder = {
375
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
376
+ }
377
+
378
+ # remove encoder, decoder kwargs from kwargs
379
+ for key in kwargs_encoder.keys():
380
+ del kwargs["encoder_" + key]
381
+ for key in kwargs_decoder.keys():
382
+ del kwargs["decoder_" + key]
383
+
384
+ # Load and initialize the encoder and decoder
385
+ # The distinction between encoder and decoder at the model level is made
386
+ # by the value of the flag `is_decoder` that we need to set correctly.
387
+ encoder = kwargs_encoder.pop("model", None)
388
+ if encoder is None:
389
+ if encoder_pretrained_model_name_or_path is None:
390
+ raise ValueError(
391
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
392
+ "to be defined."
393
+ )
394
+
395
+ if "config" not in kwargs_encoder:
396
+ encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
397
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
398
+ logger.info(
399
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
400
+ "from a decoder model. Cross-attention and casual mask are disabled."
401
+ )
402
+ encoder_config.is_decoder = False
403
+ encoder_config.add_cross_attention = False
404
+
405
+ kwargs_encoder["config"] = encoder_config
406
+
407
+ kwargs_encoder["name"] = "encoder"
408
+ kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
409
+ encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
410
+
411
+ decoder = kwargs_decoder.pop("model", None)
412
+ if decoder is None:
413
+ if decoder_pretrained_model_name_or_path is None:
414
+ raise ValueError(
415
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
416
+ "to be defined."
417
+ )
418
+
419
+ if "config" not in kwargs_decoder:
420
+ decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
421
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
422
+ logger.info(
423
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
424
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
425
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
426
+ )
427
+ decoder_config.is_decoder = True
428
+ decoder_config.add_cross_attention = True
429
+
430
+ kwargs_decoder["config"] = decoder_config
431
+
432
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
433
+ logger.warning(
434
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
435
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
436
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
437
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
438
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
439
+ )
440
+
441
+ kwargs_decoder["name"] = "decoder"
442
+ kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
443
+ decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
444
+
445
+ # Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
446
+ if encoder.name != "encoder":
447
+ raise ValueError("encoder model must be created with the name `encoder`.")
448
+ if decoder.name != "decoder":
449
+ raise ValueError("decoder model must be created with the name `decoder`.")
450
+
451
+ # instantiate config with corresponding kwargs
452
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
453
+ return cls(encoder=encoder, decoder=decoder, config=config)
454
+
455
+ @unpack_inputs
456
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
457
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
458
+ def call(
459
+ self,
460
+ input_ids: TFModelInputType | None = None,
461
+ attention_mask: np.ndarray | tf.Tensor | None = None,
462
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
463
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
464
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
465
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
466
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
467
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
468
+ labels: np.ndarray | tf.Tensor | None = None,
469
+ use_cache: Optional[bool] = None,
470
+ output_attentions: Optional[bool] = None,
471
+ output_hidden_states: Optional[bool] = None,
472
+ return_dict: Optional[bool] = None,
473
+ training: bool = False,
474
+ **kwargs,
475
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
476
+ r"""
477
+ Returns:
478
+
479
+ Examples:
480
+
481
+ ```python
482
+ >>> from transformers import TFEncoderDecoderModel, BertTokenizer
483
+
484
+ >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
485
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
486
+
487
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
488
+
489
+ >>> # forward
490
+ >>> input_ids = tokenizer.encode(
491
+ ... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
492
+ ... ) # Batch size 1
493
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
494
+
495
+ >>> # training
496
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
497
+ >>> loss, logits = outputs.loss, outputs.logits
498
+
499
+ >>> # save and load from pretrained
500
+ >>> model.save_pretrained("bert2gpt2")
501
+ >>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2")
502
+
503
+ >>> # generation
504
+ >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)
505
+ ```"""
506
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
507
+
508
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
509
+
510
+ kwargs_decoder = {
511
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
512
+ }
513
+
514
+ # Let the user be responsible for the expected format.
515
+ if encoder_outputs is not None:
516
+ if return_dict and not isinstance(encoder_outputs, ModelOutput):
517
+ raise ValueError(
518
+ "If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
519
+ f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
520
+ )
521
+
522
+ if encoder_outputs is None:
523
+ encoder_inputs = {
524
+ "input_ids": input_ids,
525
+ "attention_mask": attention_mask,
526
+ "inputs_embeds": inputs_embeds,
527
+ "output_attentions": output_attentions,
528
+ "output_hidden_states": output_hidden_states,
529
+ "return_dict": return_dict,
530
+ "training": training,
531
+ }
532
+
533
+ # Add arguments to encoder from `kwargs_encoder`
534
+ encoder_inputs.update(kwargs_encoder)
535
+
536
+ # Handle the case where the inputs are passed as a single dict which contains `labels`.
537
+ # The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
538
+ # parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
539
+ if "labels" in encoder_inputs:
540
+ labels = encoder_inputs.pop("labels")
541
+
542
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
543
+ if "decoder_input_ids" in encoder_inputs:
544
+ decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
545
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
546
+ if "decoder_attention_mask" in encoder_inputs:
547
+ decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
548
+
549
+ encoder_outputs = self.encoder(**encoder_inputs)
550
+
551
+ encoder_hidden_states = encoder_outputs[0]
552
+
553
+ # optionally project encoder_hidden_states
554
+ if (
555
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
556
+ and self.decoder.config.cross_attention_hidden_size is None
557
+ ):
558
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
559
+
560
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
561
+ decoder_input_ids = shift_tokens_right(
562
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
563
+ )
564
+
565
+ decoder_inputs = {
566
+ "input_ids": decoder_input_ids,
567
+ "attention_mask": decoder_attention_mask,
568
+ "encoder_hidden_states": encoder_hidden_states,
569
+ "encoder_attention_mask": attention_mask,
570
+ "inputs_embeds": decoder_inputs_embeds,
571
+ "output_attentions": output_attentions,
572
+ "output_hidden_states": output_hidden_states,
573
+ "use_cache": use_cache,
574
+ "past_key_values": past_key_values,
575
+ "return_dict": return_dict,
576
+ "training": training,
577
+ }
578
+
579
+ # Add arguments to decoder from `kwargs_decoder`
580
+ decoder_inputs.update(kwargs_decoder)
581
+
582
+ decoder_outputs = self.decoder(**decoder_inputs)
583
+
584
+ logits = decoder_outputs[0]
585
+
586
+ # Compute loss independent from decoder (as some shift the logits inside them)
587
+ loss = None
588
+ if labels is not None:
589
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
590
+ loss = self.hf_compute_loss(labels, logits)
591
+
592
+ if not return_dict:
593
+ past_key_values = None
594
+ if use_cache:
595
+ past_key_values = decoder_outputs[1]
596
+ # The starting index of the remaining elements in `decoder_outputs`
597
+ start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
598
+
599
+ if not isinstance(encoder_outputs, tuple):
600
+ encoder_outputs = encoder_outputs.to_tuple()
601
+ output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
602
+ output = tuple([x for x in output if x is not None])
603
+ return output
604
+
605
+ return TFSeq2SeqLMOutput(
606
+ loss=loss,
607
+ logits=decoder_outputs.logits,
608
+ past_key_values=decoder_outputs.past_key_values,
609
+ decoder_hidden_states=decoder_outputs.hidden_states,
610
+ decoder_attentions=decoder_outputs.attentions,
611
+ cross_attentions=decoder_outputs.cross_attentions,
612
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
613
+ encoder_hidden_states=encoder_outputs.hidden_states,
614
+ encoder_attentions=encoder_outputs.attentions,
615
+ )
616
+
617
+ def prepare_inputs_for_generation(
618
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
619
+ ):
620
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
621
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
622
+ past_key_values = decoder_inputs.get("past_key_values")
623
+ if past_key_values is None:
624
+ past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
625
+ input_dict = {
626
+ "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy
627
+ "attention_mask": attention_mask,
628
+ "decoder_attention_mask": decoder_attention_mask,
629
+ "decoder_input_ids": decoder_inputs["input_ids"],
630
+ # TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
631
+ "encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
632
+ "past_key_values": past_key_values,
633
+ "use_cache": use_cache,
634
+ }
635
+ return input_dict
636
+
637
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
638
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
639
+
640
+ def resize_token_embeddings(self, *args, **kwargs):
641
+ raise NotImplementedError(
642
+ "Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the"
643
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
644
+ " model.decoder.resize_token_embeddings(...))"
645
+ )
646
+
647
+ def _reorder_cache(self, past, beam_idx):
648
+ # apply decoder cache reordering here
649
+ return self.decoder._reorder_cache(past, beam_idx)
650
+
651
+ def build(self, input_shape=None):
652
+ if self.built:
653
+ return
654
+ self.built = True
655
+ if getattr(self, "enc_to_dec_proj", None) is not None:
656
+ with tf.name_scope(self.enc_to_dec_proj.name):
657
+ self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size])
658
+ if getattr(self, "encoder", None) is not None:
659
+ with tf.name_scope(self.encoder.name):
660
+ self.encoder.build(None)
661
+ if getattr(self, "decoder", None) is not None:
662
+ with tf.name_scope(self.decoder.name):
663
+ self.decoder.build(None)
venv/lib/python3.10/site-packages/transformers/models/ibert/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_ibert"] = [
29
+ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "IBertForMaskedLM",
31
+ "IBertForMultipleChoice",
32
+ "IBertForQuestionAnswering",
33
+ "IBertForSequenceClassification",
34
+ "IBertForTokenClassification",
35
+ "IBertModel",
36
+ "IBertPreTrainedModel",
37
+ ]
38
+
39
+ if TYPE_CHECKING:
40
+ from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
41
+
42
+ try:
43
+ if not is_torch_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ from .modeling_ibert import (
49
+ IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
50
+ IBertForMaskedLM,
51
+ IBertForMultipleChoice,
52
+ IBertForQuestionAnswering,
53
+ IBertForSequenceClassification,
54
+ IBertForTokenClassification,
55
+ IBertModel,
56
+ IBertPreTrainedModel,
57
+ )
58
+
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/configuration_ibert.cpython-310.pyc ADDED
Binary file (6.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/modeling_ibert.cpython-310.pyc ADDED
Binary file (35.2 kB). View file