applied-ai-018 commited on
Commit
e2a1d53
·
verified ·
1 Parent(s): aacdcba

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py +138 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py +1567 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +1522 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py +240 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py +120 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__init__.py +64 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py +433 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb_fast.py +340 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__init__.py +81 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/configuration_plbart.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/convert_plbart_original_checkpoint_to_torch.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/modeling_plbart.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/tokenization_plbart.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/configuration_plbart.py +192 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py +94 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/modeling_plbart.py +1765 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/tokenization_plbart.py +425 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py +65 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +425 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__init__.py +62 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__pycache__/configuration_stablelm.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__pycache__/modeling_stablelm.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/configuration_stablelm.py +189 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/modeling_stablelm.py +1385 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__init__.py +65 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/configuration_visual_bert.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/modeling_visual_bert.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/configuration_visual_bert.py +135 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py +150 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/modeling_visual_bert.py +1590 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/vitmatte/__init__.py +72 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py +286 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2/__init__.py +134 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_blenderbot_small": [
28
+ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "BlenderbotSmallConfig",
30
+ "BlenderbotSmallOnnxConfig",
31
+ ],
32
+ "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_blenderbot_small_fast"] = ["BlenderbotSmallTokenizerFast"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_blenderbot_small"] = [
50
+ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "BlenderbotSmallForCausalLM",
52
+ "BlenderbotSmallForConditionalGeneration",
53
+ "BlenderbotSmallModel",
54
+ "BlenderbotSmallPreTrainedModel",
55
+ ]
56
+
57
+ try:
58
+ if not is_tf_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ _import_structure["modeling_tf_blenderbot_small"] = [
64
+ "TFBlenderbotSmallForConditionalGeneration",
65
+ "TFBlenderbotSmallModel",
66
+ "TFBlenderbotSmallPreTrainedModel",
67
+ ]
68
+
69
+ try:
70
+ if not is_flax_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ _import_structure["modeling_flax_blenderbot_small"] = [
76
+ "FlaxBlenderbotSmallForConditionalGeneration",
77
+ "FlaxBlenderbotSmallModel",
78
+ "FlaxBlenderbotSmallPreTrainedModel",
79
+ ]
80
+
81
+ if TYPE_CHECKING:
82
+ from .configuration_blenderbot_small import (
83
+ BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
84
+ BlenderbotSmallConfig,
85
+ BlenderbotSmallOnnxConfig,
86
+ )
87
+ from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
88
+
89
+ try:
90
+ if not is_tokenizers_available():
91
+ raise OptionalDependencyNotAvailable()
92
+ except OptionalDependencyNotAvailable:
93
+ pass
94
+ else:
95
+ from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
96
+
97
+ try:
98
+ if not is_torch_available():
99
+ raise OptionalDependencyNotAvailable()
100
+ except OptionalDependencyNotAvailable:
101
+ pass
102
+ else:
103
+ from .modeling_blenderbot_small import (
104
+ BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
105
+ BlenderbotSmallForCausalLM,
106
+ BlenderbotSmallForConditionalGeneration,
107
+ BlenderbotSmallModel,
108
+ BlenderbotSmallPreTrainedModel,
109
+ )
110
+
111
+ try:
112
+ if not is_tf_available():
113
+ raise OptionalDependencyNotAvailable()
114
+ except OptionalDependencyNotAvailable:
115
+ pass
116
+ else:
117
+ from .modeling_tf_blenderbot_small import (
118
+ TFBlenderbotSmallForConditionalGeneration,
119
+ TFBlenderbotSmallModel,
120
+ TFBlenderbotSmallPreTrainedModel,
121
+ )
122
+
123
+ try:
124
+ if not is_flax_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_flax_blenderbot_small import (
130
+ FlaxBlenderbotSmallForConditionalGeneration,
131
+ FlaxBlenderbotSmallModel,
132
+ FlaxBlenderbotSmallPreTrainedModel,
133
+ )
134
+
135
+ else:
136
+ import sys
137
+
138
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc ADDED
Binary file (49.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc ADDED
Binary file (8.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py ADDED
@@ -0,0 +1,1567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BlenderbotSmall model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ Seq2SeqLMOutput,
34
+ Seq2SeqModelOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_end_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
50
+
51
+
52
+ from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
56
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
57
+ """
58
+ Shift input ids one token to the right.
59
+ """
60
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
61
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
62
+ shifted_input_ids[:, 0] = decoder_start_token_id
63
+
64
+ if pad_token_id is None:
65
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
66
+ # replace possible -100 values in labels by `pad_token_id`
67
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
68
+
69
+ return shifted_input_ids
70
+
71
+
72
+ # Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
73
+ class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding):
74
+ """
75
+ This module learns positional embeddings up to a fixed maximum size.
76
+ """
77
+
78
+ def __init__(self, num_embeddings: int, embedding_dim: int):
79
+ super().__init__(num_embeddings, embedding_dim)
80
+
81
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
82
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
83
+ bsz, seq_len = input_ids_shape[:2]
84
+ positions = torch.arange(
85
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
86
+ )
87
+ return super().forward(positions)
88
+
89
+
90
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
91
+ class BlenderbotSmallAttention(nn.Module):
92
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
93
+
94
+ def __init__(
95
+ self,
96
+ embed_dim: int,
97
+ num_heads: int,
98
+ dropout: float = 0.0,
99
+ is_decoder: bool = False,
100
+ bias: bool = True,
101
+ is_causal: bool = False,
102
+ config: Optional[BlenderbotSmallConfig] = None,
103
+ ):
104
+ super().__init__()
105
+ self.embed_dim = embed_dim
106
+ self.num_heads = num_heads
107
+ self.dropout = dropout
108
+ self.head_dim = embed_dim // num_heads
109
+ self.config = config
110
+
111
+ if (self.head_dim * num_heads) != self.embed_dim:
112
+ raise ValueError(
113
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
114
+ f" and `num_heads`: {num_heads})."
115
+ )
116
+ self.scaling = self.head_dim**-0.5
117
+ self.is_decoder = is_decoder
118
+ self.is_causal = is_causal
119
+
120
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
121
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
122
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
123
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
124
+
125
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
126
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
127
+
128
+ def forward(
129
+ self,
130
+ hidden_states: torch.Tensor,
131
+ key_value_states: Optional[torch.Tensor] = None,
132
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
133
+ attention_mask: Optional[torch.Tensor] = None,
134
+ layer_head_mask: Optional[torch.Tensor] = None,
135
+ output_attentions: bool = False,
136
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
137
+ """Input shape: Batch x Time x Channel"""
138
+
139
+ # if key_value_states are provided this layer is used as a cross-attention layer
140
+ # for the decoder
141
+ is_cross_attention = key_value_states is not None
142
+
143
+ bsz, tgt_len, _ = hidden_states.size()
144
+
145
+ # get query proj
146
+ query_states = self.q_proj(hidden_states) * self.scaling
147
+ # get key, value proj
148
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
149
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
150
+ # the provided `key_value_states` to support prefix tuning
151
+ if (
152
+ is_cross_attention
153
+ and past_key_value is not None
154
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
155
+ ):
156
+ # reuse k,v, cross_attentions
157
+ key_states = past_key_value[0]
158
+ value_states = past_key_value[1]
159
+ elif is_cross_attention:
160
+ # cross_attentions
161
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
162
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
163
+ elif past_key_value is not None:
164
+ # reuse k, v, self_attention
165
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
166
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
167
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
168
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
169
+ else:
170
+ # self_attention
171
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
172
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
173
+
174
+ if self.is_decoder:
175
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
176
+ # Further calls to cross_attention layer can then reuse all cross-attention
177
+ # key/value_states (first "if" case)
178
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
179
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
180
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
181
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
182
+ past_key_value = (key_states, value_states)
183
+
184
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
185
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
186
+ key_states = key_states.reshape(*proj_shape)
187
+ value_states = value_states.reshape(*proj_shape)
188
+
189
+ src_len = key_states.size(1)
190
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
191
+
192
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
193
+ raise ValueError(
194
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
195
+ f" {attn_weights.size()}"
196
+ )
197
+
198
+ if attention_mask is not None:
199
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
200
+ raise ValueError(
201
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
202
+ )
203
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
204
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
205
+
206
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
207
+
208
+ if layer_head_mask is not None:
209
+ if layer_head_mask.size() != (self.num_heads,):
210
+ raise ValueError(
211
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
212
+ f" {layer_head_mask.size()}"
213
+ )
214
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
215
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
216
+
217
+ if output_attentions:
218
+ # this operation is a bit awkward, but it's required to
219
+ # make sure that attn_weights keeps its gradient.
220
+ # In order to do so, attn_weights have to be reshaped
221
+ # twice and have to be reused in the following
222
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
223
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
224
+ else:
225
+ attn_weights_reshaped = None
226
+
227
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
228
+
229
+ attn_output = torch.bmm(attn_probs, value_states)
230
+
231
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
232
+ raise ValueError(
233
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
234
+ f" {attn_output.size()}"
235
+ )
236
+
237
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
238
+ attn_output = attn_output.transpose(1, 2)
239
+
240
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
241
+ # partitioned across GPUs when using tensor-parallelism.
242
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
243
+
244
+ attn_output = self.out_proj(attn_output)
245
+
246
+ return attn_output, attn_weights_reshaped, past_key_value
247
+
248
+
249
+ # Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
250
+ class BlenderbotSmallEncoderLayer(nn.Module):
251
+ def __init__(self, config: BlenderbotSmallConfig):
252
+ super().__init__()
253
+ self.embed_dim = config.d_model
254
+
255
+ self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
256
+ embed_dim=self.embed_dim,
257
+ num_heads=config.encoder_attention_heads,
258
+ dropout=config.attention_dropout,
259
+ config=config,
260
+ )
261
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
262
+ self.dropout = config.dropout
263
+ self.activation_fn = ACT2FN[config.activation_function]
264
+ self.activation_dropout = config.activation_dropout
265
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
266
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
267
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
268
+
269
+ def forward(
270
+ self,
271
+ hidden_states: torch.FloatTensor,
272
+ attention_mask: torch.FloatTensor,
273
+ layer_head_mask: torch.FloatTensor,
274
+ output_attentions: Optional[bool] = False,
275
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
276
+ """
277
+ Args:
278
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
279
+ attention_mask (`torch.FloatTensor`): attention mask of size
280
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
281
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
282
+ `(encoder_attention_heads,)`.
283
+ output_attentions (`bool`, *optional*):
284
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
285
+ returned tensors for more detail.
286
+ """
287
+ residual = hidden_states
288
+ hidden_states, attn_weights, _ = self.self_attn(
289
+ hidden_states=hidden_states,
290
+ attention_mask=attention_mask,
291
+ layer_head_mask=layer_head_mask,
292
+ output_attentions=output_attentions,
293
+ )
294
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
295
+ hidden_states = residual + hidden_states
296
+ hidden_states = self.self_attn_layer_norm(hidden_states)
297
+
298
+ residual = hidden_states
299
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
300
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
301
+ hidden_states = self.fc2(hidden_states)
302
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
303
+ hidden_states = residual + hidden_states
304
+ hidden_states = self.final_layer_norm(hidden_states)
305
+
306
+ if hidden_states.dtype == torch.float16 and (
307
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
308
+ ):
309
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
310
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
311
+
312
+ outputs = (hidden_states,)
313
+
314
+ if output_attentions:
315
+ outputs += (attn_weights,)
316
+
317
+ return outputs
318
+
319
+
320
+ # TODO: Implement attention with SDPA for TimeSeriesTransformer.
321
+ BLENDERBOT_SMALL_ATTENTION_CLASSES = {
322
+ "eager": BlenderbotSmallAttention,
323
+ }
324
+
325
+
326
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
327
+ class BlenderbotSmallDecoderLayer(nn.Module):
328
+ def __init__(self, config: BlenderbotSmallConfig):
329
+ super().__init__()
330
+ self.embed_dim = config.d_model
331
+
332
+ self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
333
+ embed_dim=self.embed_dim,
334
+ num_heads=config.decoder_attention_heads,
335
+ dropout=config.attention_dropout,
336
+ is_decoder=True,
337
+ is_causal=True,
338
+ config=config,
339
+ )
340
+ self.dropout = config.dropout
341
+ self.activation_fn = ACT2FN[config.activation_function]
342
+ self.activation_dropout = config.activation_dropout
343
+
344
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
345
+ self.encoder_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
346
+ self.embed_dim,
347
+ config.decoder_attention_heads,
348
+ dropout=config.attention_dropout,
349
+ is_decoder=True,
350
+ config=config,
351
+ )
352
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
353
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
354
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
355
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
356
+
357
+ def forward(
358
+ self,
359
+ hidden_states: torch.Tensor,
360
+ attention_mask: Optional[torch.Tensor] = None,
361
+ encoder_hidden_states: Optional[torch.Tensor] = None,
362
+ encoder_attention_mask: Optional[torch.Tensor] = None,
363
+ layer_head_mask: Optional[torch.Tensor] = None,
364
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
365
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
366
+ output_attentions: Optional[bool] = False,
367
+ use_cache: Optional[bool] = True,
368
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
369
+ """
370
+ Args:
371
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
372
+ attention_mask (`torch.FloatTensor`): attention mask of size
373
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
374
+ encoder_hidden_states (`torch.FloatTensor`):
375
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
376
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
377
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
378
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
379
+ `(encoder_attention_heads,)`.
380
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
381
+ size `(decoder_attention_heads,)`.
382
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
383
+ output_attentions (`bool`, *optional*):
384
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
385
+ returned tensors for more detail.
386
+ """
387
+ residual = hidden_states
388
+
389
+ # Self Attention
390
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
391
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
392
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
393
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
394
+ hidden_states=hidden_states,
395
+ past_key_value=self_attn_past_key_value,
396
+ attention_mask=attention_mask,
397
+ layer_head_mask=layer_head_mask,
398
+ output_attentions=output_attentions,
399
+ )
400
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
401
+ hidden_states = residual + hidden_states
402
+ hidden_states = self.self_attn_layer_norm(hidden_states)
403
+
404
+ # Cross-Attention Block
405
+ cross_attn_present_key_value = None
406
+ cross_attn_weights = None
407
+ if encoder_hidden_states is not None:
408
+ residual = hidden_states
409
+
410
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
411
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
412
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
413
+ hidden_states=hidden_states,
414
+ key_value_states=encoder_hidden_states,
415
+ attention_mask=encoder_attention_mask,
416
+ layer_head_mask=cross_attn_layer_head_mask,
417
+ past_key_value=cross_attn_past_key_value,
418
+ output_attentions=output_attentions,
419
+ )
420
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
421
+ hidden_states = residual + hidden_states
422
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
423
+
424
+ # add cross-attn to positions 3,4 of present_key_value tuple
425
+ present_key_value = present_key_value + cross_attn_present_key_value
426
+
427
+ # Fully Connected
428
+ residual = hidden_states
429
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
430
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
431
+ hidden_states = self.fc2(hidden_states)
432
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
433
+ hidden_states = residual + hidden_states
434
+ hidden_states = self.final_layer_norm(hidden_states)
435
+
436
+ outputs = (hidden_states,)
437
+
438
+ if output_attentions:
439
+ outputs += (self_attn_weights, cross_attn_weights)
440
+
441
+ if use_cache:
442
+ outputs += (present_key_value,)
443
+
444
+ return outputs
445
+
446
+
447
+ class BlenderbotSmallPreTrainedModel(PreTrainedModel):
448
+ config_class = BlenderbotSmallConfig
449
+ base_model_prefix = "model"
450
+ supports_gradient_checkpointing = True
451
+
452
+ def _init_weights(self, module):
453
+ std = self.config.init_std
454
+ if isinstance(module, nn.Linear):
455
+ module.weight.data.normal_(mean=0.0, std=std)
456
+ if module.bias is not None:
457
+ module.bias.data.zero_()
458
+ elif isinstance(module, nn.Embedding):
459
+ module.weight.data.normal_(mean=0.0, std=std)
460
+ if module.padding_idx is not None:
461
+ module.weight.data[module.padding_idx].zero_()
462
+
463
+ @property
464
+ def dummy_inputs(self):
465
+ pad_token = self.config.pad_token_id
466
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
467
+ dummy_inputs = {
468
+ "attention_mask": input_ids.ne(pad_token),
469
+ "input_ids": input_ids,
470
+ "decoder_input_ids": input_ids,
471
+ }
472
+ return dummy_inputs
473
+
474
+
475
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
476
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
477
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
478
+ etc.)
479
+
480
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
481
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
482
+ and behavior.
483
+
484
+ Parameters:
485
+ config ([`BlenderbotSmallConfig`]):
486
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
487
+ load the weights associated with the model, only the configuration. Check out the
488
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
489
+ """
490
+
491
+ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
492
+ Conversation example:
493
+
494
+ ```python
495
+ >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration
496
+
497
+ >>> mname = "facebook/blenderbot_small-90M"
498
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
499
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
500
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
501
+ >>> print("Human: ", UTTERANCE)
502
+ Human: My friends are cool but they eat too many carbs.
503
+
504
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
505
+ >>> reply_ids = model.generate(**inputs)
506
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
507
+ Bot: what kind of carbs do they eat? i don't know much about carbs.
508
+
509
+ >>> REPLY = "I'm not sure"
510
+ >>> print("Human: ", REPLY)
511
+ Human: I'm not sure
512
+
513
+ >>> NEXT_UTTERANCE = (
514
+ ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? "
515
+ ... "i don't know much about carbs__end__ "
516
+ ... "__start__ I'm not sure."
517
+ ... )
518
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
519
+ >>> next_reply_ids = model.generate(**inputs)
520
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
521
+ Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats.
522
+ ```
523
+ """
524
+
525
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
526
+ Args:
527
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
528
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
529
+ it.
530
+
531
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
532
+ [`PreTrainedTokenizer.__call__`] for details.
533
+
534
+ [What are input IDs?](../glossary#input-ids)
535
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
536
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
537
+
538
+ - 1 for tokens that are **not masked**,
539
+ - 0 for tokens that are **masked**.
540
+
541
+ [What are attention masks?](../glossary#attention-mask)
542
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
543
+ Indices of decoder input sequence tokens in the vocabulary.
544
+
545
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
546
+ [`PreTrainedTokenizer.__call__`] for details.
547
+
548
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
549
+
550
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
551
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
552
+ `past_key_values`).
553
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
554
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
555
+ be used by default.
556
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
557
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
558
+
559
+ - 1 indicates the head is **not masked**,
560
+ - 0 indicates the head is **masked**.
561
+
562
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
563
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
564
+
565
+ - 1 indicates the head is **not masked**,
566
+ - 0 indicates the head is **masked**.
567
+
568
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
569
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
570
+ 1]`:
571
+
572
+ - 1 indicates the head is **not masked**,
573
+ - 0 indicates the head is **masked**.
574
+
575
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
576
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
577
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
578
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
579
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
580
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
581
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
582
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
583
+
584
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
585
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
586
+
587
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
588
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
589
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
590
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
591
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
592
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
593
+ than the model's internal embedding lookup matrix.
594
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
595
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
596
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
597
+ input (see `past_key_values`). This is useful if you want more control over how to convert
598
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
599
+
600
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
601
+ of `inputs_embeds`.
602
+ use_cache (`bool`, *optional*):
603
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
604
+ `past_key_values`).
605
+ output_attentions (`bool`, *optional*):
606
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
607
+ tensors for more detail.
608
+ output_hidden_states (`bool`, *optional*):
609
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
610
+ more detail.
611
+ return_dict (`bool`, *optional*):
612
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
613
+ """
614
+
615
+
616
+ class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel):
617
+ """
618
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
619
+ [`BlenderbotSmallEncoderLayer`].
620
+
621
+ Args:
622
+ config: BlenderbotSmallConfig
623
+ embed_tokens (nn.Embedding): output embedding
624
+ """
625
+
626
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
627
+ super().__init__(config)
628
+
629
+ self.dropout = config.dropout
630
+ self.layerdrop = config.encoder_layerdrop
631
+
632
+ embed_dim = config.d_model
633
+ self.padding_idx = config.pad_token_id
634
+ self.max_source_positions = config.max_position_embeddings
635
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
636
+
637
+ if embed_tokens is not None:
638
+ self.embed_tokens = embed_tokens
639
+ else:
640
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
641
+
642
+ self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
643
+ config.max_position_embeddings,
644
+ embed_dim,
645
+ )
646
+ self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
647
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
648
+
649
+ self.gradient_checkpointing = False
650
+ # Initialize weights and apply final processing
651
+ self.post_init()
652
+
653
+ def forward(
654
+ self,
655
+ input_ids=None,
656
+ attention_mask=None,
657
+ head_mask=None,
658
+ inputs_embeds=None,
659
+ output_attentions=None,
660
+ output_hidden_states=None,
661
+ return_dict=None,
662
+ ):
663
+ r"""
664
+ Args:
665
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
666
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
667
+ provide it.
668
+
669
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
670
+ [`PreTrainedTokenizer.__call__`] for details.
671
+
672
+ [What are input IDs?](../glossary#input-ids)
673
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
674
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
675
+
676
+ - 1 for tokens that are **not masked**,
677
+ - 0 for tokens that are **masked**.
678
+
679
+ [What are attention masks?](../glossary#attention-mask)
680
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
681
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
682
+
683
+ - 1 indicates the head is **not masked**,
684
+ - 0 indicates the head is **masked**.
685
+
686
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
687
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
688
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
689
+ than the model's internal embedding lookup matrix.
690
+ output_attentions (`bool`, *optional*):
691
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
692
+ returned tensors for more detail.
693
+ output_hidden_states (`bool`, *optional*):
694
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
695
+ for more detail.
696
+ return_dict (`bool`, *optional*):
697
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
698
+ """
699
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
700
+ output_hidden_states = (
701
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
702
+ )
703
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
704
+
705
+ # retrieve input_ids and inputs_embeds
706
+ if input_ids is not None and inputs_embeds is not None:
707
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
708
+ elif input_ids is not None:
709
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
710
+ input_shape = input_ids.size()
711
+ input_ids = input_ids.view(-1, input_shape[-1])
712
+ elif inputs_embeds is not None:
713
+ input_shape = inputs_embeds.size()[:-1]
714
+ else:
715
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
716
+
717
+ if inputs_embeds is None:
718
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
719
+
720
+ embed_pos = self.embed_positions(input_shape)
721
+
722
+ hidden_states = inputs_embeds + embed_pos
723
+ hidden_states = self.layernorm_embedding(hidden_states)
724
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
725
+
726
+ # expand attention_mask
727
+ if attention_mask is not None:
728
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
729
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
730
+
731
+ encoder_states = () if output_hidden_states else None
732
+ all_attentions = () if output_attentions else None
733
+
734
+ # check if head_mask has a correct number of layers specified if desired
735
+ if head_mask is not None:
736
+ if head_mask.size()[0] != len(self.layers):
737
+ raise ValueError(
738
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
739
+ f" {head_mask.size()[0]}."
740
+ )
741
+ for idx, encoder_layer in enumerate(self.layers):
742
+ if output_hidden_states:
743
+ encoder_states = encoder_states + (hidden_states,)
744
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
745
+ to_drop = False
746
+ if self.training:
747
+ dropout_probability = torch.rand([])
748
+ if dropout_probability < self.layerdrop: # skip the layer
749
+ to_drop = True
750
+
751
+ if to_drop:
752
+ layer_outputs = (None, None)
753
+ else:
754
+ if self.gradient_checkpointing and self.training:
755
+ layer_outputs = self._gradient_checkpointing_func(
756
+ encoder_layer.__call__,
757
+ hidden_states,
758
+ attention_mask,
759
+ (head_mask[idx] if head_mask is not None else None),
760
+ output_attentions,
761
+ )
762
+ else:
763
+ layer_outputs = encoder_layer(
764
+ hidden_states,
765
+ attention_mask,
766
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
767
+ output_attentions=output_attentions,
768
+ )
769
+
770
+ hidden_states = layer_outputs[0]
771
+
772
+ if output_attentions:
773
+ all_attentions = all_attentions + (layer_outputs[1],)
774
+
775
+ if output_hidden_states:
776
+ encoder_states = encoder_states + (hidden_states,)
777
+
778
+ if not return_dict:
779
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
780
+ return BaseModelOutput(
781
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
782
+ )
783
+
784
+
785
+ class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
786
+ """
787
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`]
788
+
789
+ Args:
790
+ config: BlenderbotSmallConfig
791
+ embed_tokens (nn.Embedding): output embedding
792
+ """
793
+
794
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
795
+ super().__init__(config)
796
+ self.dropout = config.dropout
797
+ self.layerdrop = config.decoder_layerdrop
798
+ self.padding_idx = config.pad_token_id
799
+ self.max_target_positions = config.max_position_embeddings
800
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
801
+
802
+ if embed_tokens is not None:
803
+ self.embed_tokens = embed_tokens
804
+ else:
805
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
806
+
807
+ self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
808
+ config.max_position_embeddings,
809
+ config.d_model,
810
+ )
811
+ self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)])
812
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
813
+
814
+ self.gradient_checkpointing = False
815
+ # Initialize weights and apply final processing
816
+ self.post_init()
817
+
818
+ def get_input_embeddings(self):
819
+ return self.embed_tokens
820
+
821
+ def set_input_embeddings(self, value):
822
+ self.embed_tokens = value
823
+
824
+ def forward(
825
+ self,
826
+ input_ids=None,
827
+ attention_mask=None,
828
+ encoder_hidden_states=None,
829
+ encoder_attention_mask=None,
830
+ head_mask=None,
831
+ cross_attn_head_mask=None,
832
+ past_key_values=None,
833
+ inputs_embeds=None,
834
+ use_cache=None,
835
+ output_attentions=None,
836
+ output_hidden_states=None,
837
+ return_dict=None,
838
+ ):
839
+ r"""
840
+ Args:
841
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
842
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
843
+ provide it.
844
+
845
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
846
+ [`PreTrainedTokenizer.__call__`] for details.
847
+
848
+ [What are input IDs?](../glossary#input-ids)
849
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
850
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
851
+
852
+ - 1 for tokens that are **not masked**,
853
+ - 0 for tokens that are **masked**.
854
+
855
+ [What are attention masks?](../glossary#attention-mask)
856
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
857
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
858
+ of the decoder.
859
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
860
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
861
+ selected in `[0, 1]`:
862
+
863
+ - 1 for tokens that are **not masked**,
864
+ - 0 for tokens that are **masked**.
865
+
866
+ [What are attention masks?](../glossary#attention-mask)
867
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
868
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
869
+
870
+ - 1 indicates the head is **not masked**,
871
+ - 0 indicates the head is **masked**.
872
+
873
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
874
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
875
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
876
+
877
+ - 1 indicates the head is **not masked**,
878
+ - 0 indicates the head is **masked**.
879
+
880
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
881
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
882
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
883
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
884
+
885
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
886
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
887
+
888
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
889
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
890
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
891
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
892
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
893
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
894
+ than the model's internal embedding lookup matrix.
895
+ output_attentions (`bool`, *optional*):
896
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
897
+ returned tensors for more detail.
898
+ output_hidden_states (`bool`, *optional*):
899
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
900
+ for more detail.
901
+ return_dict (`bool`, *optional*):
902
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
903
+ """
904
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
905
+ output_hidden_states = (
906
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
907
+ )
908
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
909
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
910
+
911
+ # retrieve input_ids and inputs_embeds
912
+ if input_ids is not None and inputs_embeds is not None:
913
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
914
+ elif input_ids is not None:
915
+ input_shape = input_ids.size()
916
+ input_ids = input_ids.view(-1, input_shape[-1])
917
+ elif inputs_embeds is not None:
918
+ input_shape = inputs_embeds.size()[:-1]
919
+ else:
920
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
921
+
922
+ # past_key_values_length
923
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
924
+
925
+ if inputs_embeds is None:
926
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
927
+
928
+ attention_mask = _prepare_4d_causal_attention_mask(
929
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
930
+ )
931
+
932
+ # expand encoder attention mask
933
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
934
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
935
+ encoder_attention_mask = _prepare_4d_attention_mask(
936
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
937
+ )
938
+
939
+ # embed positions
940
+ positions = self.embed_positions(input_shape, past_key_values_length)
941
+
942
+ # BlenderbotSmall applies layer norm on hidden_states
943
+ inputs_embeds = self.layernorm_embedding(inputs_embeds)
944
+ hidden_states = inputs_embeds + positions
945
+
946
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
947
+
948
+ if self.gradient_checkpointing and self.training:
949
+ if use_cache:
950
+ logger.warning_once(
951
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
952
+ )
953
+ use_cache = False
954
+
955
+ # decoder layers
956
+ all_hidden_states = () if output_hidden_states else None
957
+ all_self_attns = () if output_attentions else None
958
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
959
+ next_decoder_cache = () if use_cache else None
960
+
961
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
962
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
963
+ if attn_mask is not None:
964
+ if attn_mask.size()[0] != len(self.layers):
965
+ raise ValueError(
966
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
967
+ f" {head_mask.size()[0]}."
968
+ )
969
+ for idx, decoder_layer in enumerate(self.layers):
970
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
971
+ if output_hidden_states:
972
+ all_hidden_states += (hidden_states,)
973
+ if self.training:
974
+ dropout_probability = torch.rand([])
975
+ if dropout_probability < self.layerdrop:
976
+ continue
977
+
978
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
979
+
980
+ if self.gradient_checkpointing and self.training:
981
+ layer_outputs = self._gradient_checkpointing_func(
982
+ decoder_layer.__call__,
983
+ hidden_states,
984
+ attention_mask,
985
+ encoder_hidden_states,
986
+ encoder_attention_mask,
987
+ head_mask[idx] if head_mask is not None else None,
988
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
989
+ None,
990
+ output_attentions,
991
+ use_cache,
992
+ )
993
+ else:
994
+ layer_outputs = decoder_layer(
995
+ hidden_states,
996
+ attention_mask=attention_mask,
997
+ encoder_hidden_states=encoder_hidden_states,
998
+ encoder_attention_mask=encoder_attention_mask,
999
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1000
+ cross_attn_layer_head_mask=(
1001
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1002
+ ),
1003
+ past_key_value=past_key_value,
1004
+ output_attentions=output_attentions,
1005
+ use_cache=use_cache,
1006
+ )
1007
+ hidden_states = layer_outputs[0]
1008
+
1009
+ if use_cache:
1010
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1011
+
1012
+ if output_attentions:
1013
+ all_self_attns += (layer_outputs[1],)
1014
+
1015
+ if encoder_hidden_states is not None:
1016
+ all_cross_attentions += (layer_outputs[2],)
1017
+
1018
+ # add hidden states from the last decoder layer
1019
+ if output_hidden_states:
1020
+ all_hidden_states += (hidden_states,)
1021
+
1022
+ next_cache = next_decoder_cache if use_cache else None
1023
+ if not return_dict:
1024
+ return tuple(
1025
+ v
1026
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1027
+ if v is not None
1028
+ )
1029
+ return BaseModelOutputWithPastAndCrossAttentions(
1030
+ last_hidden_state=hidden_states,
1031
+ past_key_values=next_cache,
1032
+ hidden_states=all_hidden_states,
1033
+ attentions=all_self_attns,
1034
+ cross_attentions=all_cross_attentions,
1035
+ )
1036
+
1037
+
1038
+ @add_start_docstrings(
1039
+ "The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.",
1040
+ BLENDERBOT_SMALL_START_DOCSTRING,
1041
+ )
1042
+ class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
1043
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
1044
+
1045
+ def __init__(self, config: BlenderbotSmallConfig):
1046
+ super().__init__(config)
1047
+
1048
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1049
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1050
+
1051
+ self.encoder = BlenderbotSmallEncoder(config, self.shared)
1052
+ self.decoder = BlenderbotSmallDecoder(config, self.shared)
1053
+
1054
+ # Initialize weights and apply final processing
1055
+ self.post_init()
1056
+
1057
+ def get_input_embeddings(self):
1058
+ return self.shared
1059
+
1060
+ def set_input_embeddings(self, value):
1061
+ self.shared = value
1062
+ self.encoder.embed_tokens = self.shared
1063
+ self.decoder.embed_tokens = self.shared
1064
+
1065
+ def get_encoder(self):
1066
+ return self.encoder
1067
+
1068
+ def get_decoder(self):
1069
+ return self.decoder
1070
+
1071
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1072
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1073
+ def forward(
1074
+ self,
1075
+ input_ids: Optional[torch.LongTensor] = None,
1076
+ attention_mask: Optional[torch.Tensor] = None,
1077
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1078
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1079
+ head_mask: Optional[torch.Tensor] = None,
1080
+ decoder_head_mask: Optional[torch.Tensor] = None,
1081
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1082
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
1083
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1084
+ inputs_embeds: Optional[torch.Tensor] = None,
1085
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1086
+ use_cache: Optional[bool] = None,
1087
+ output_attentions: Optional[bool] = None,
1088
+ output_hidden_states: Optional[bool] = None,
1089
+ return_dict: Optional[bool] = None,
1090
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
1091
+ r"""
1092
+ Returns:
1093
+
1094
+ Example:
1095
+
1096
+ ```python
1097
+ >>> from transformers import AutoTokenizer, BlenderbotSmallModel
1098
+
1099
+ >>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M")
1100
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1101
+
1102
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
1103
+ >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1
1104
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
1105
+
1106
+ >>> last_hidden_states = outputs.last_hidden_state
1107
+ >>> list(last_hidden_states.shape)
1108
+ [1, 3, 512]
1109
+ ```"""
1110
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1111
+ output_hidden_states = (
1112
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1113
+ )
1114
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1115
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1116
+
1117
+ if encoder_outputs is None:
1118
+ encoder_outputs = self.encoder(
1119
+ input_ids=input_ids,
1120
+ attention_mask=attention_mask,
1121
+ head_mask=head_mask,
1122
+ inputs_embeds=inputs_embeds,
1123
+ output_attentions=output_attentions,
1124
+ output_hidden_states=output_hidden_states,
1125
+ return_dict=return_dict,
1126
+ )
1127
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1128
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1129
+ encoder_outputs = BaseModelOutput(
1130
+ last_hidden_state=encoder_outputs[0],
1131
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1132
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1133
+ )
1134
+
1135
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1136
+ decoder_outputs = self.decoder(
1137
+ input_ids=decoder_input_ids,
1138
+ attention_mask=decoder_attention_mask,
1139
+ encoder_hidden_states=encoder_outputs[0],
1140
+ encoder_attention_mask=attention_mask,
1141
+ head_mask=decoder_head_mask,
1142
+ cross_attn_head_mask=cross_attn_head_mask,
1143
+ past_key_values=past_key_values,
1144
+ inputs_embeds=decoder_inputs_embeds,
1145
+ use_cache=use_cache,
1146
+ output_attentions=output_attentions,
1147
+ output_hidden_states=output_hidden_states,
1148
+ return_dict=return_dict,
1149
+ )
1150
+
1151
+ if not return_dict:
1152
+ return decoder_outputs + encoder_outputs
1153
+
1154
+ return Seq2SeqModelOutput(
1155
+ last_hidden_state=decoder_outputs.last_hidden_state,
1156
+ past_key_values=decoder_outputs.past_key_values,
1157
+ decoder_hidden_states=decoder_outputs.hidden_states,
1158
+ decoder_attentions=decoder_outputs.attentions,
1159
+ cross_attentions=decoder_outputs.cross_attentions,
1160
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1161
+ encoder_hidden_states=encoder_outputs.hidden_states,
1162
+ encoder_attentions=encoder_outputs.attentions,
1163
+ )
1164
+
1165
+
1166
+ @add_start_docstrings(
1167
+ "The BlenderbotSmall Model with a language modeling head. Can be used for summarization.",
1168
+ BLENDERBOT_SMALL_START_DOCSTRING,
1169
+ )
1170
+ class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
1171
+ base_model_prefix = "model"
1172
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
1173
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
1174
+
1175
+ def __init__(self, config: BlenderbotSmallConfig):
1176
+ super().__init__(config)
1177
+ self.model = BlenderbotSmallModel(config)
1178
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
1179
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1180
+
1181
+ # Initialize weights and apply final processing
1182
+ self.post_init()
1183
+
1184
+ def get_encoder(self):
1185
+ return self.model.get_encoder()
1186
+
1187
+ def get_decoder(self):
1188
+ return self.model.get_decoder()
1189
+
1190
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1191
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1192
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1193
+ return new_embeddings
1194
+
1195
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1196
+ old_num_tokens = self.final_logits_bias.shape[-1]
1197
+ if new_num_tokens <= old_num_tokens:
1198
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1199
+ else:
1200
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1201
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1202
+ self.register_buffer("final_logits_bias", new_bias)
1203
+
1204
+ def get_output_embeddings(self):
1205
+ return self.lm_head
1206
+
1207
+ def set_output_embeddings(self, new_embeddings):
1208
+ self.lm_head = new_embeddings
1209
+
1210
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1211
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1212
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
1213
+ def forward(
1214
+ self,
1215
+ input_ids: Optional[torch.LongTensor] = None,
1216
+ attention_mask: Optional[torch.Tensor] = None,
1217
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1218
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1219
+ head_mask: Optional[torch.Tensor] = None,
1220
+ decoder_head_mask: Optional[torch.Tensor] = None,
1221
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1222
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
1223
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1224
+ inputs_embeds: Optional[torch.Tensor] = None,
1225
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1226
+ labels: Optional[torch.LongTensor] = None,
1227
+ use_cache: Optional[bool] = None,
1228
+ output_attentions: Optional[bool] = None,
1229
+ output_hidden_states: Optional[bool] = None,
1230
+ return_dict: Optional[bool] = None,
1231
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
1232
+ r"""
1233
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1234
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1235
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1236
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1237
+
1238
+ Returns:
1239
+ """
1240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1241
+
1242
+ if labels is not None:
1243
+ if use_cache:
1244
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1245
+ use_cache = False
1246
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1247
+ decoder_input_ids = shift_tokens_right(
1248
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1249
+ )
1250
+
1251
+ outputs = self.model(
1252
+ input_ids,
1253
+ attention_mask=attention_mask,
1254
+ decoder_input_ids=decoder_input_ids,
1255
+ encoder_outputs=encoder_outputs,
1256
+ decoder_attention_mask=decoder_attention_mask,
1257
+ head_mask=head_mask,
1258
+ decoder_head_mask=decoder_head_mask,
1259
+ cross_attn_head_mask=cross_attn_head_mask,
1260
+ past_key_values=past_key_values,
1261
+ inputs_embeds=inputs_embeds,
1262
+ decoder_inputs_embeds=decoder_inputs_embeds,
1263
+ use_cache=use_cache,
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ )
1268
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
1269
+
1270
+ masked_lm_loss = None
1271
+ if labels is not None:
1272
+ loss_fct = CrossEntropyLoss()
1273
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1274
+
1275
+ if not return_dict:
1276
+ output = (lm_logits,) + outputs[1:]
1277
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1278
+
1279
+ return Seq2SeqLMOutput(
1280
+ loss=masked_lm_loss,
1281
+ logits=lm_logits,
1282
+ past_key_values=outputs.past_key_values,
1283
+ decoder_hidden_states=outputs.decoder_hidden_states,
1284
+ decoder_attentions=outputs.decoder_attentions,
1285
+ cross_attentions=outputs.cross_attentions,
1286
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1287
+ encoder_hidden_states=outputs.encoder_hidden_states,
1288
+ encoder_attentions=outputs.encoder_attentions,
1289
+ )
1290
+
1291
+ def prepare_inputs_for_generation(
1292
+ self,
1293
+ decoder_input_ids,
1294
+ past_key_values=None,
1295
+ attention_mask=None,
1296
+ head_mask=None,
1297
+ decoder_head_mask=None,
1298
+ cross_attn_head_mask=None,
1299
+ use_cache=None,
1300
+ encoder_outputs=None,
1301
+ **kwargs,
1302
+ ):
1303
+ # cut decoder_input_ids if past is used
1304
+ if past_key_values is not None:
1305
+ past_length = past_key_values[0][0].shape[2]
1306
+
1307
+ # Some generation methods already pass only the last input ID
1308
+ if decoder_input_ids.shape[1] > past_length:
1309
+ remove_prefix_length = past_length
1310
+ else:
1311
+ # Default to old behavior: keep only final ID
1312
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1313
+
1314
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1315
+
1316
+ return {
1317
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1318
+ "encoder_outputs": encoder_outputs,
1319
+ "past_key_values": past_key_values,
1320
+ "decoder_input_ids": decoder_input_ids,
1321
+ "attention_mask": attention_mask,
1322
+ "head_mask": head_mask,
1323
+ "decoder_head_mask": decoder_head_mask,
1324
+ "cross_attn_head_mask": cross_attn_head_mask,
1325
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1326
+ }
1327
+
1328
+ @staticmethod
1329
+ def _reorder_cache(past_key_values, beam_idx):
1330
+ reordered_past = ()
1331
+ for layer_past in past_key_values:
1332
+ # cached cross_attention states don't have to be reordered -> they are always the same
1333
+ reordered_past += (
1334
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1335
+ + layer_past[2:],
1336
+ )
1337
+ return reordered_past
1338
+
1339
+
1340
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->BlenderbotSmall
1341
+ class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel):
1342
+ """
1343
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1344
+ used in combination with the [`EncoderDecoderModel`] framework.
1345
+ """
1346
+
1347
+ def __init__(self, config):
1348
+ super().__init__(config)
1349
+ self.decoder = BlenderbotSmallDecoder(config)
1350
+
1351
+ def forward(self, *args, **kwargs):
1352
+ return self.decoder(*args, **kwargs)
1353
+
1354
+
1355
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M
1356
+ class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel):
1357
+ _tied_weights_keys = ["lm_head.weight"]
1358
+
1359
+ def __init__(self, config):
1360
+ config = copy.deepcopy(config)
1361
+ config.is_decoder = True
1362
+ config.is_encoder_decoder = False
1363
+ super().__init__(config)
1364
+ self.model = BlenderbotSmallDecoderWrapper(config)
1365
+
1366
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1367
+
1368
+ # Initialize weights and apply final processing
1369
+ self.post_init()
1370
+
1371
+ def get_input_embeddings(self):
1372
+ return self.model.decoder.embed_tokens
1373
+
1374
+ def set_input_embeddings(self, value):
1375
+ self.model.decoder.embed_tokens = value
1376
+
1377
+ def get_output_embeddings(self):
1378
+ return self.lm_head
1379
+
1380
+ def set_output_embeddings(self, new_embeddings):
1381
+ self.lm_head = new_embeddings
1382
+
1383
+ def set_decoder(self, decoder):
1384
+ self.model.decoder = decoder
1385
+
1386
+ def get_decoder(self):
1387
+ return self.model.decoder
1388
+
1389
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1390
+ def forward(
1391
+ self,
1392
+ input_ids: torch.LongTensor = None,
1393
+ attention_mask: Optional[torch.Tensor] = None,
1394
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1395
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1396
+ head_mask: Optional[torch.Tensor] = None,
1397
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1398
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1399
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1400
+ labels: Optional[torch.LongTensor] = None,
1401
+ use_cache: Optional[bool] = None,
1402
+ output_attentions: Optional[bool] = None,
1403
+ output_hidden_states: Optional[bool] = None,
1404
+ return_dict: Optional[bool] = None,
1405
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1406
+ r"""
1407
+ Args:
1408
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1409
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1410
+ provide it.
1411
+
1412
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1413
+ [`PreTrainedTokenizer.__call__`] for details.
1414
+
1415
+ [What are input IDs?](../glossary#input-ids)
1416
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1417
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1418
+
1419
+ - 1 for tokens that are **not masked**,
1420
+ - 0 for tokens that are **masked**.
1421
+
1422
+ [What are attention masks?](../glossary#attention-mask)
1423
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1424
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1425
+ if the model is configured as a decoder.
1426
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1427
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
1428
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1429
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1430
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1431
+
1432
+ - 1 indicates the head is **not masked**,
1433
+ - 0 indicates the head is **masked**.
1434
+
1435
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1436
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1437
+
1438
+ - 1 indicates the head is **not masked**,
1439
+ - 0 indicates the head is **masked**.
1440
+
1441
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1442
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1443
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1444
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
1445
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
1446
+
1447
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1448
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1449
+
1450
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1451
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1452
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1453
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1454
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1455
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1456
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1457
+ use_cache (`bool`, *optional*):
1458
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1459
+ (see `past_key_values`).
1460
+
1461
+ - 1 for tokens that are **not masked**,
1462
+ - 0 for tokens that are **masked**.
1463
+ output_attentions (`bool`, *optional*):
1464
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1465
+ returned tensors for more detail.
1466
+ output_hidden_states (`bool`, *optional*):
1467
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1468
+ for more detail.
1469
+ return_dict (`bool`, *optional*):
1470
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1471
+
1472
+ Returns:
1473
+
1474
+ Example:
1475
+
1476
+ ```python
1477
+ >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM
1478
+
1479
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1480
+ >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False)
1481
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
1482
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1483
+ >>> outputs = model(**inputs)
1484
+
1485
+ >>> logits = outputs.logits
1486
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
1487
+ >>> list(logits.shape) == expected_shape
1488
+ True
1489
+ ```"""
1490
+
1491
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1492
+ output_hidden_states = (
1493
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1494
+ )
1495
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1496
+
1497
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1498
+ outputs = self.model.decoder(
1499
+ input_ids=input_ids,
1500
+ attention_mask=attention_mask,
1501
+ encoder_hidden_states=encoder_hidden_states,
1502
+ encoder_attention_mask=encoder_attention_mask,
1503
+ head_mask=head_mask,
1504
+ cross_attn_head_mask=cross_attn_head_mask,
1505
+ past_key_values=past_key_values,
1506
+ inputs_embeds=inputs_embeds,
1507
+ use_cache=use_cache,
1508
+ output_attentions=output_attentions,
1509
+ output_hidden_states=output_hidden_states,
1510
+ return_dict=return_dict,
1511
+ )
1512
+
1513
+ logits = self.lm_head(outputs[0])
1514
+
1515
+ loss = None
1516
+ if labels is not None:
1517
+ labels = labels.to(logits.device)
1518
+ loss_fct = CrossEntropyLoss()
1519
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
1520
+
1521
+ if not return_dict:
1522
+ output = (logits,) + outputs[1:]
1523
+ return (loss,) + output if loss is not None else output
1524
+
1525
+ return CausalLMOutputWithCrossAttentions(
1526
+ loss=loss,
1527
+ logits=logits,
1528
+ past_key_values=outputs.past_key_values,
1529
+ hidden_states=outputs.hidden_states,
1530
+ attentions=outputs.attentions,
1531
+ cross_attentions=outputs.cross_attentions,
1532
+ )
1533
+
1534
+ def prepare_inputs_for_generation(
1535
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1536
+ ):
1537
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1538
+ if attention_mask is None:
1539
+ attention_mask = input_ids.new_ones(input_ids.shape)
1540
+
1541
+ if past_key_values:
1542
+ past_length = past_key_values[0][0].shape[2]
1543
+
1544
+ # Some generation methods already pass only the last input ID
1545
+ if input_ids.shape[1] > past_length:
1546
+ remove_prefix_length = past_length
1547
+ else:
1548
+ # Default to old behavior: keep only final ID
1549
+ remove_prefix_length = input_ids.shape[1] - 1
1550
+
1551
+ input_ids = input_ids[:, remove_prefix_length:]
1552
+ # first step, decoder_cached_states are empty
1553
+ return {
1554
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
1555
+ "attention_mask": attention_mask,
1556
+ "past_key_values": past_key_values,
1557
+ "use_cache": use_cache,
1558
+ }
1559
+
1560
+ @staticmethod
1561
+ def _reorder_cache(past_key_values, beam_idx):
1562
+ reordered_past = ()
1563
+ for layer_past in past_key_values:
1564
+ reordered_past += (
1565
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1566
+ )
1567
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py ADDED
@@ -0,0 +1,1522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax BlenderbotSmall model."""
16
+
17
+
18
+ import math
19
+ import random
20
+ from functools import partial
21
+ from typing import Callable, Optional, Tuple
22
+
23
+ import flax.linen as nn
24
+ import jax
25
+ import jax.numpy as jnp
26
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
27
+ from flax.linen import combine_masks, make_causal_mask
28
+ from flax.linen.attention import dot_product_attention_weights
29
+ from flax.traverse_util import flatten_dict, unflatten_dict
30
+ from jax import lax
31
+ from jax.random import PRNGKey
32
+
33
+ from ...modeling_flax_outputs import (
34
+ FlaxBaseModelOutput,
35
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
36
+ FlaxCausalLMOutputWithCrossAttentions,
37
+ FlaxSeq2SeqLMOutput,
38
+ FlaxSeq2SeqModelOutput,
39
+ )
40
+ from ...modeling_flax_utils import (
41
+ ACT2FN,
42
+ FlaxPreTrainedModel,
43
+ append_call_sample_docstring,
44
+ append_replace_return_docstrings,
45
+ overwrite_call_docstring,
46
+ )
47
+ from ...utils import add_start_docstrings, logging, replace_return_docstrings
48
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
54
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
55
+
56
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
57
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
58
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
59
+ etc.)
60
+
61
+ This model is also a Flax Linen
62
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
63
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
64
+
65
+ Finally, this model supports inherent JAX features such as:
66
+
67
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
68
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
69
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
70
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
71
+
72
+ Parameters:
73
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
74
+ Initializing with a config file does not load the weights associated with the model, only the
75
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
76
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
77
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
78
+ `jax.numpy.bfloat16` (on TPUs).
79
+
80
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
81
+ specified all the computation will be performed with the given `dtype`.
82
+
83
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
84
+ parameters.**
85
+
86
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
87
+ [`~FlaxPreTrainedModel.to_bf16`].
88
+ """
89
+
90
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
91
+ Args:
92
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
93
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
94
+ it.
95
+
96
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
97
+ [`PreTrainedTokenizer.__call__`] for details.
98
+
99
+ [What are input IDs?](../glossary#input-ids)
100
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
101
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
102
+
103
+ - 1 for tokens that are **not masked**,
104
+ - 0 for tokens that are **masked**.
105
+
106
+ [What are attention masks?](../glossary#attention-mask)
107
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Indices of decoder input sequence tokens in the vocabulary.
109
+
110
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
111
+ [`PreTrainedTokenizer.__call__`] for details.
112
+
113
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
114
+
115
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
116
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
117
+ for denoising pre-training following the paper.
118
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
119
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
120
+ be used by default.
121
+
122
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
123
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
124
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
125
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
126
+ config.max_position_embeddings - 1]`.
127
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
128
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
129
+ range `[0, config.max_position_embeddings - 1]`.
130
+ output_attentions (`bool`, *optional*):
131
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
132
+ tensors for more detail.
133
+ output_hidden_states (`bool`, *optional*):
134
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
135
+ more detail.
136
+ return_dict (`bool`, *optional*):
137
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
138
+ """
139
+
140
+
141
+ BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING = r"""
142
+ Args:
143
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
144
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
145
+ it.
146
+
147
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
148
+ [`PreTrainedTokenizer.__call__`] for details.
149
+
150
+ [What are input IDs?](../glossary#input-ids)
151
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
152
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
153
+
154
+ - 1 for tokens that are **not masked**,
155
+ - 0 for tokens that are **masked**.
156
+
157
+ [What are attention masks?](../glossary#attention-mask)
158
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
159
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
160
+ config.max_position_embeddings - 1]`.
161
+ output_attentions (`bool`, *optional*):
162
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
163
+ tensors for more detail.
164
+ output_hidden_states (`bool`, *optional*):
165
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
166
+ more detail.
167
+ return_dict (`bool`, *optional*):
168
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
169
+ """
170
+
171
+ BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING = r"""
172
+ Args:
173
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
174
+ Indices of decoder input sequence tokens in the vocabulary.
175
+
176
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
177
+ [`PreTrainedTokenizer.__call__`] for details.
178
+
179
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
180
+
181
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
182
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
183
+ for denoising pre-training following the paper.
184
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
185
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
186
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
187
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
188
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
189
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
190
+
191
+ - 1 for tokens that are **not masked**,
192
+ - 0 for tokens that are **masked**.
193
+
194
+ [What are attention masks?](../glossary#attention-mask)
195
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
196
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
197
+ be used by default.
198
+
199
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
200
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
201
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
202
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
203
+ range `[0, config.max_position_embeddings - 1]`.
204
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
205
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
206
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
207
+ output_attentions (`bool`, *optional*):
208
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
209
+ tensors for more detail.
210
+ output_hidden_states (`bool`, *optional*):
211
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
212
+ more detail.
213
+ return_dict (`bool`, *optional*):
214
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
215
+ """
216
+
217
+
218
+ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
219
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
220
+ """
221
+ Shift input ids one token to the right.
222
+ """
223
+ shifted_input_ids = jnp.zeros_like(input_ids)
224
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
225
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
226
+
227
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
228
+ return shifted_input_ids
229
+
230
+
231
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->BlenderbotSmall
232
+ class FlaxBlenderbotSmallAttention(nn.Module):
233
+ config: BlenderbotSmallConfig
234
+ embed_dim: int
235
+ num_heads: int
236
+ dropout: float = 0.0
237
+ causal: bool = False
238
+ bias: bool = True
239
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
240
+
241
+ def setup(self) -> None:
242
+ self.head_dim = self.embed_dim // self.num_heads
243
+ if self.head_dim * self.num_heads != self.embed_dim:
244
+ raise ValueError(
245
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
246
+ f" and `num_heads`: {self.num_heads})."
247
+ )
248
+
249
+ dense = partial(
250
+ nn.Dense,
251
+ self.embed_dim,
252
+ use_bias=self.bias,
253
+ dtype=self.dtype,
254
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
255
+ )
256
+
257
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
258
+ self.out_proj = dense()
259
+
260
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
261
+
262
+ if self.causal:
263
+ self.causal_mask = make_causal_mask(
264
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
265
+ )
266
+
267
+ def _split_heads(self, hidden_states):
268
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
269
+
270
+ def _merge_heads(self, hidden_states):
271
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
272
+
273
+ @nn.compact
274
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
275
+ """
276
+ This function takes projected key, value states from a single input token and concatenates the states to cached
277
+ states from previous steps. This function is slighly adapted from the official Flax repository:
278
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
279
+ """
280
+ # detect if we're initializing by absence of existing cache data.
281
+ is_initialized = self.has_variable("cache", "cached_key")
282
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
283
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
284
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
285
+
286
+ if is_initialized:
287
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
288
+ # update key, value caches with our new 1d spatial slices
289
+ cur_index = cache_index.value
290
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
291
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
292
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
293
+ cached_key.value = key
294
+ cached_value.value = value
295
+ num_updated_cache_vectors = query.shape[1]
296
+ cache_index.value = cache_index.value + num_updated_cache_vectors
297
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
298
+ pad_mask = jnp.broadcast_to(
299
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
300
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
301
+ )
302
+ attention_mask = combine_masks(pad_mask, attention_mask)
303
+ return key, value, attention_mask
304
+
305
+ def __call__(
306
+ self,
307
+ hidden_states: jnp.ndarray,
308
+ key_value_states: Optional[jnp.ndarray] = None,
309
+ attention_mask: Optional[jnp.ndarray] = None,
310
+ init_cache: bool = False,
311
+ deterministic: bool = True,
312
+ ) -> Tuple[jnp.ndarray]:
313
+ """Input shape: Batch x Time x Channel"""
314
+
315
+ # if key_value_states are provided this layer is used as a cross-attention layer
316
+ # for the decoder
317
+ is_cross_attention = key_value_states is not None
318
+ batch_size = hidden_states.shape[0]
319
+
320
+ # get query proj
321
+ query_states = self.q_proj(hidden_states)
322
+ # get key, value proj
323
+ if is_cross_attention:
324
+ # cross_attentions
325
+ key_states = self.k_proj(key_value_states)
326
+ value_states = self.v_proj(key_value_states)
327
+ else:
328
+ # self_attention
329
+ key_states = self.k_proj(hidden_states)
330
+ value_states = self.v_proj(hidden_states)
331
+
332
+ query_states = self._split_heads(query_states)
333
+ key_states = self._split_heads(key_states)
334
+ value_states = self._split_heads(value_states)
335
+
336
+ # handle cache prepare causal attention mask
337
+ if self.causal:
338
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
339
+ if self.has_variable("cache", "cached_key"):
340
+ mask_shift = self.variables["cache"]["cache_index"]
341
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
342
+ causal_mask = lax.dynamic_slice(
343
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
344
+ )
345
+ else:
346
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
347
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
348
+
349
+ # combine masks if needed
350
+ if attention_mask is not None and self.causal:
351
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
352
+ attention_mask = combine_masks(attention_mask, causal_mask)
353
+ elif self.causal:
354
+ attention_mask = causal_mask
355
+ elif attention_mask is not None:
356
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
357
+
358
+ # During fast autoregressive decoding, we feed one position at a time,
359
+ # and cache the keys and values step by step.
360
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
361
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
362
+ key_states, value_states, query_states, attention_mask
363
+ )
364
+
365
+ # Convert the boolean attention mask to an attention bias.
366
+ if attention_mask is not None:
367
+ # attention mask in the form of attention bias
368
+ attention_bias = lax.select(
369
+ attention_mask > 0,
370
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
371
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
372
+ )
373
+ else:
374
+ attention_bias = None
375
+
376
+ dropout_rng = None
377
+ if not deterministic and self.dropout > 0.0:
378
+ dropout_rng = self.make_rng("dropout")
379
+
380
+ attn_weights = dot_product_attention_weights(
381
+ query_states,
382
+ key_states,
383
+ bias=attention_bias,
384
+ dropout_rng=dropout_rng,
385
+ dropout_rate=self.dropout,
386
+ broadcast_dropout=True,
387
+ deterministic=deterministic,
388
+ dtype=self.dtype,
389
+ precision=None,
390
+ )
391
+
392
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
393
+ attn_output = self._merge_heads(attn_output)
394
+ attn_output = self.out_proj(attn_output)
395
+
396
+ return attn_output, attn_weights
397
+
398
+
399
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->BlenderbotSmall
400
+ class FlaxBlenderbotSmallEncoderLayer(nn.Module):
401
+ config: BlenderbotSmallConfig
402
+ dtype: jnp.dtype = jnp.float32
403
+
404
+ def setup(self) -> None:
405
+ self.embed_dim = self.config.d_model
406
+ self.self_attn = FlaxBlenderbotSmallAttention(
407
+ config=self.config,
408
+ embed_dim=self.embed_dim,
409
+ num_heads=self.config.encoder_attention_heads,
410
+ dropout=self.config.attention_dropout,
411
+ dtype=self.dtype,
412
+ )
413
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
414
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
415
+ self.activation_fn = ACT2FN[self.config.activation_function]
416
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
417
+ self.fc1 = nn.Dense(
418
+ self.config.encoder_ffn_dim,
419
+ dtype=self.dtype,
420
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
421
+ )
422
+ self.fc2 = nn.Dense(
423
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
424
+ )
425
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
426
+
427
+ def __call__(
428
+ self,
429
+ hidden_states: jnp.ndarray,
430
+ attention_mask: jnp.ndarray,
431
+ output_attentions: bool = True,
432
+ deterministic: bool = True,
433
+ ) -> Tuple[jnp.ndarray]:
434
+ residual = hidden_states
435
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
436
+
437
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
438
+ hidden_states = residual + hidden_states
439
+ hidden_states = self.self_attn_layer_norm(hidden_states)
440
+
441
+ residual = hidden_states
442
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
443
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
444
+ hidden_states = self.fc2(hidden_states)
445
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
446
+ hidden_states = residual + hidden_states
447
+ hidden_states = self.final_layer_norm(hidden_states)
448
+
449
+ outputs = (hidden_states,)
450
+
451
+ if output_attentions:
452
+ outputs += (attn_weights,)
453
+
454
+ return outputs
455
+
456
+
457
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->BlenderbotSmall
458
+ class FlaxBlenderbotSmallEncoderLayerCollection(nn.Module):
459
+ config: BlenderbotSmallConfig
460
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
461
+
462
+ def setup(self):
463
+ self.layers = [
464
+ FlaxBlenderbotSmallEncoderLayer(self.config, name=str(i), dtype=self.dtype)
465
+ for i in range(self.config.encoder_layers)
466
+ ]
467
+ self.layerdrop = self.config.encoder_layerdrop
468
+
469
+ def __call__(
470
+ self,
471
+ hidden_states,
472
+ attention_mask,
473
+ deterministic: bool = True,
474
+ output_attentions: bool = False,
475
+ output_hidden_states: bool = False,
476
+ return_dict: bool = True,
477
+ ):
478
+ all_attentions = () if output_attentions else None
479
+ all_hidden_states = () if output_hidden_states else None
480
+
481
+ for encoder_layer in self.layers:
482
+ if output_hidden_states:
483
+ all_hidden_states = all_hidden_states + (hidden_states,)
484
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
485
+ dropout_probability = random.uniform(0, 1)
486
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
487
+ layer_outputs = (None, None)
488
+ else:
489
+ layer_outputs = encoder_layer(
490
+ hidden_states,
491
+ attention_mask,
492
+ output_attentions,
493
+ deterministic,
494
+ )
495
+ hidden_states = layer_outputs[0]
496
+ if output_attentions:
497
+ all_attentions = all_attentions + (layer_outputs[1],)
498
+
499
+ if output_hidden_states:
500
+ all_hidden_states += (hidden_states,)
501
+
502
+ outputs = (hidden_states, all_hidden_states, all_attentions)
503
+
504
+ if not return_dict:
505
+ return tuple(v for v in outputs if v is not None)
506
+
507
+ return FlaxBaseModelOutput(
508
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
509
+ )
510
+
511
+
512
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->BlenderbotSmall
513
+ class FlaxBlenderbotSmallDecoderLayer(nn.Module):
514
+ config: BlenderbotSmallConfig
515
+ dtype: jnp.dtype = jnp.float32
516
+
517
+ def setup(self) -> None:
518
+ self.embed_dim = self.config.d_model
519
+ self.self_attn = FlaxBlenderbotSmallAttention(
520
+ config=self.config,
521
+ embed_dim=self.embed_dim,
522
+ num_heads=self.config.decoder_attention_heads,
523
+ dropout=self.config.attention_dropout,
524
+ causal=True,
525
+ dtype=self.dtype,
526
+ )
527
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
528
+ self.activation_fn = ACT2FN[self.config.activation_function]
529
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
530
+
531
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
532
+ self.encoder_attn = FlaxBlenderbotSmallAttention(
533
+ config=self.config,
534
+ embed_dim=self.embed_dim,
535
+ num_heads=self.config.decoder_attention_heads,
536
+ dropout=self.config.attention_dropout,
537
+ dtype=self.dtype,
538
+ )
539
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
540
+ self.fc1 = nn.Dense(
541
+ self.config.decoder_ffn_dim,
542
+ dtype=self.dtype,
543
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
544
+ )
545
+ self.fc2 = nn.Dense(
546
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
547
+ )
548
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
549
+
550
+ def __call__(
551
+ self,
552
+ hidden_states: jnp.ndarray,
553
+ attention_mask: jnp.ndarray,
554
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
555
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
556
+ init_cache: bool = False,
557
+ output_attentions: bool = True,
558
+ deterministic: bool = True,
559
+ ) -> Tuple[jnp.ndarray]:
560
+ residual = hidden_states
561
+
562
+ # Self Attention
563
+ hidden_states, self_attn_weights = self.self_attn(
564
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
565
+ )
566
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
567
+ hidden_states = residual + hidden_states
568
+ hidden_states = self.self_attn_layer_norm(hidden_states)
569
+
570
+ # Cross-Attention Block
571
+ cross_attn_weights = None
572
+ if encoder_hidden_states is not None:
573
+ residual = hidden_states
574
+
575
+ hidden_states, cross_attn_weights = self.encoder_attn(
576
+ hidden_states=hidden_states,
577
+ key_value_states=encoder_hidden_states,
578
+ attention_mask=encoder_attention_mask,
579
+ )
580
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
581
+ hidden_states = residual + hidden_states
582
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
583
+
584
+ # Fully Connected
585
+ residual = hidden_states
586
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
587
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
588
+ hidden_states = self.fc2(hidden_states)
589
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
590
+ hidden_states = residual + hidden_states
591
+ hidden_states = self.final_layer_norm(hidden_states)
592
+
593
+ outputs = (hidden_states,)
594
+
595
+ if output_attentions:
596
+ outputs += (self_attn_weights, cross_attn_weights)
597
+
598
+ return outputs
599
+
600
+
601
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->BlenderbotSmall
602
+ class FlaxBlenderbotSmallDecoderLayerCollection(nn.Module):
603
+ config: BlenderbotSmallConfig
604
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
605
+
606
+ def setup(self):
607
+ self.layers = [
608
+ FlaxBlenderbotSmallDecoderLayer(self.config, name=str(i), dtype=self.dtype)
609
+ for i in range(self.config.decoder_layers)
610
+ ]
611
+ self.layerdrop = self.config.decoder_layerdrop
612
+
613
+ def __call__(
614
+ self,
615
+ hidden_states,
616
+ attention_mask,
617
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
618
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
619
+ deterministic: bool = True,
620
+ init_cache: bool = False,
621
+ output_attentions: bool = False,
622
+ output_hidden_states: bool = False,
623
+ return_dict: bool = True,
624
+ ):
625
+ # decoder layers
626
+ all_hidden_states = () if output_hidden_states else None
627
+ all_self_attns = () if output_attentions else None
628
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
629
+
630
+ for decoder_layer in self.layers:
631
+ if output_hidden_states:
632
+ all_hidden_states += (hidden_states,)
633
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
634
+ dropout_probability = random.uniform(0, 1)
635
+ if not deterministic and (dropout_probability < self.layerdrop):
636
+ layer_outputs = (None, None, None)
637
+ else:
638
+ layer_outputs = decoder_layer(
639
+ hidden_states,
640
+ attention_mask=attention_mask,
641
+ encoder_hidden_states=encoder_hidden_states,
642
+ encoder_attention_mask=encoder_attention_mask,
643
+ init_cache=init_cache,
644
+ output_attentions=output_attentions,
645
+ deterministic=deterministic,
646
+ )
647
+
648
+ hidden_states = layer_outputs[0]
649
+ if output_attentions:
650
+ all_self_attns += (layer_outputs[1],)
651
+
652
+ if encoder_hidden_states is not None:
653
+ all_cross_attentions += (layer_outputs[2],)
654
+
655
+ # add hidden states from the last decoder layer
656
+ if output_hidden_states:
657
+ all_hidden_states += (hidden_states,)
658
+
659
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
660
+
661
+ if not return_dict:
662
+ return tuple(v for v in outputs if v is not None)
663
+
664
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
665
+ last_hidden_state=hidden_states,
666
+ hidden_states=all_hidden_states,
667
+ attentions=all_self_attns,
668
+ cross_attentions=all_cross_attentions,
669
+ )
670
+
671
+
672
+ class FlaxBlenderbotSmallEncoder(nn.Module):
673
+ config: BlenderbotSmallConfig
674
+ embed_tokens: nn.Embed
675
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
676
+
677
+ def setup(self):
678
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
679
+
680
+ embed_dim = self.config.d_model
681
+ self.padding_idx = self.config.pad_token_id
682
+ self.max_source_positions = self.config.max_position_embeddings
683
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
684
+
685
+ self.embed_positions = nn.Embed(
686
+ self.config.max_position_embeddings,
687
+ embed_dim,
688
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
689
+ )
690
+ self.layers = FlaxBlenderbotSmallEncoderLayerCollection(self.config, self.dtype)
691
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
692
+
693
+ def __call__(
694
+ self,
695
+ input_ids,
696
+ attention_mask,
697
+ position_ids,
698
+ output_attentions: bool = False,
699
+ output_hidden_states: bool = False,
700
+ return_dict: bool = True,
701
+ deterministic: bool = True,
702
+ ):
703
+ input_shape = input_ids.shape
704
+ input_ids = input_ids.reshape(-1, input_shape[-1])
705
+
706
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
707
+
708
+ embed_pos = self.embed_positions(position_ids)
709
+
710
+ hidden_states = inputs_embeds + embed_pos
711
+ hidden_states = self.layernorm_embedding(hidden_states)
712
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
713
+
714
+ outputs = self.layers(
715
+ hidden_states,
716
+ attention_mask,
717
+ deterministic=deterministic,
718
+ output_attentions=output_attentions,
719
+ output_hidden_states=output_hidden_states,
720
+ return_dict=return_dict,
721
+ )
722
+
723
+ if not return_dict:
724
+ return outputs
725
+
726
+ return FlaxBaseModelOutput(
727
+ last_hidden_state=outputs.last_hidden_state,
728
+ hidden_states=outputs.hidden_states,
729
+ attentions=outputs.attentions,
730
+ )
731
+
732
+
733
+ class FlaxBlenderbotSmallDecoder(nn.Module):
734
+ config: BlenderbotSmallConfig
735
+ embed_tokens: nn.Embed
736
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
737
+
738
+ def setup(self):
739
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
740
+
741
+ embed_dim = self.config.d_model
742
+ self.padding_idx = self.config.pad_token_id
743
+ self.max_target_positions = self.config.max_position_embeddings
744
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
745
+
746
+ self.embed_positions = nn.Embed(
747
+ self.config.max_position_embeddings,
748
+ embed_dim,
749
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
750
+ )
751
+
752
+ self.layers = FlaxBlenderbotSmallDecoderLayerCollection(self.config, self.dtype)
753
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
754
+
755
+ def __call__(
756
+ self,
757
+ input_ids,
758
+ attention_mask,
759
+ position_ids,
760
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
761
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
762
+ init_cache: bool = False,
763
+ output_attentions: bool = False,
764
+ output_hidden_states: bool = False,
765
+ return_dict: bool = True,
766
+ deterministic: bool = True,
767
+ ):
768
+ input_shape = input_ids.shape
769
+ input_ids = input_ids.reshape(-1, input_shape[-1])
770
+
771
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
772
+
773
+ # embed positions
774
+ positions = self.embed_positions(position_ids)
775
+
776
+ # BlenderbotSmall applies layer norm on inputs_embeds in decoder
777
+ inputs_embeds = self.layernorm_embedding(inputs_embeds)
778
+ hidden_states = inputs_embeds + positions
779
+
780
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
781
+
782
+ outputs = self.layers(
783
+ hidden_states,
784
+ attention_mask,
785
+ encoder_hidden_states,
786
+ encoder_attention_mask,
787
+ deterministic=deterministic,
788
+ init_cache=init_cache,
789
+ output_attentions=output_attentions,
790
+ output_hidden_states=output_hidden_states,
791
+ return_dict=return_dict,
792
+ )
793
+
794
+ if not return_dict:
795
+ return outputs
796
+
797
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
798
+ last_hidden_state=outputs.last_hidden_state,
799
+ hidden_states=outputs.hidden_states,
800
+ attentions=outputs.attentions,
801
+ cross_attentions=outputs.cross_attentions,
802
+ )
803
+
804
+
805
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->BlenderbotSmall
806
+ class FlaxBlenderbotSmallModule(nn.Module):
807
+ config: BlenderbotSmallConfig
808
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
809
+
810
+ def setup(self):
811
+ self.shared = nn.Embed(
812
+ self.config.vocab_size,
813
+ self.config.d_model,
814
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
815
+ dtype=self.dtype,
816
+ )
817
+
818
+ self.encoder = FlaxBlenderbotSmallEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
819
+ self.decoder = FlaxBlenderbotSmallDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
820
+
821
+ def _get_encoder_module(self):
822
+ return self.encoder
823
+
824
+ def _get_decoder_module(self):
825
+ return self.decoder
826
+
827
+ def __call__(
828
+ self,
829
+ input_ids,
830
+ attention_mask,
831
+ decoder_input_ids,
832
+ decoder_attention_mask,
833
+ position_ids,
834
+ decoder_position_ids,
835
+ output_attentions: bool = False,
836
+ output_hidden_states: bool = False,
837
+ return_dict: bool = True,
838
+ deterministic: bool = True,
839
+ ):
840
+ encoder_outputs = self.encoder(
841
+ input_ids=input_ids,
842
+ attention_mask=attention_mask,
843
+ position_ids=position_ids,
844
+ output_attentions=output_attentions,
845
+ output_hidden_states=output_hidden_states,
846
+ return_dict=return_dict,
847
+ deterministic=deterministic,
848
+ )
849
+
850
+ decoder_outputs = self.decoder(
851
+ input_ids=decoder_input_ids,
852
+ attention_mask=decoder_attention_mask,
853
+ position_ids=decoder_position_ids,
854
+ encoder_hidden_states=encoder_outputs[0],
855
+ encoder_attention_mask=attention_mask,
856
+ output_attentions=output_attentions,
857
+ output_hidden_states=output_hidden_states,
858
+ return_dict=return_dict,
859
+ deterministic=deterministic,
860
+ )
861
+
862
+ if not return_dict:
863
+ return decoder_outputs + encoder_outputs
864
+
865
+ return FlaxSeq2SeqModelOutput(
866
+ last_hidden_state=decoder_outputs.last_hidden_state,
867
+ decoder_hidden_states=decoder_outputs.hidden_states,
868
+ decoder_attentions=decoder_outputs.attentions,
869
+ cross_attentions=decoder_outputs.cross_attentions,
870
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
871
+ encoder_hidden_states=encoder_outputs.hidden_states,
872
+ encoder_attentions=encoder_outputs.attentions,
873
+ )
874
+
875
+
876
+ class FlaxBlenderbotSmallPreTrainedModel(FlaxPreTrainedModel):
877
+ config_class = BlenderbotSmallConfig
878
+ base_model_prefix: str = "model"
879
+ module_class: nn.Module = None
880
+
881
+ def __init__(
882
+ self,
883
+ config: BlenderbotSmallConfig,
884
+ input_shape: Tuple[int] = (1, 1),
885
+ seed: int = 0,
886
+ dtype: jnp.dtype = jnp.float32,
887
+ _do_init: bool = True,
888
+ **kwargs,
889
+ ):
890
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
891
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
892
+
893
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
894
+ # init input tensors
895
+ input_ids = jnp.zeros(input_shape, dtype="i4")
896
+ # make sure initialization pass will work for FlaxBlenderbotSmallForSequenceClassificationModule
897
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
898
+ attention_mask = jnp.ones_like(input_ids)
899
+ decoder_input_ids = input_ids
900
+ decoder_attention_mask = jnp.ones_like(input_ids)
901
+
902
+ batch_size, sequence_length = input_ids.shape
903
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
904
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
905
+
906
+ params_rng, dropout_rng = jax.random.split(rng)
907
+ rngs = {"params": params_rng, "dropout": dropout_rng}
908
+
909
+ random_params = self.module.init(
910
+ rngs,
911
+ input_ids,
912
+ attention_mask,
913
+ decoder_input_ids,
914
+ decoder_attention_mask,
915
+ position_ids,
916
+ decoder_position_ids,
917
+ )["params"]
918
+
919
+ if params is not None:
920
+ random_params = flatten_dict(unfreeze(random_params))
921
+ params = flatten_dict(unfreeze(params))
922
+ for missing_key in self._missing_keys:
923
+ params[missing_key] = random_params[missing_key]
924
+ self._missing_keys = set()
925
+ return freeze(unflatten_dict(params))
926
+ else:
927
+ return random_params
928
+
929
+ def init_cache(self, batch_size, max_length, encoder_outputs):
930
+ r"""
931
+ Args:
932
+ batch_size (`int`):
933
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
934
+ max_length (`int`):
935
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
936
+ cache.
937
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
938
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
939
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
940
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
941
+ cross-attention of the decoder.
942
+ """
943
+ # init input variables to retrieve cache
944
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
945
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
946
+ decoder_position_ids = jnp.broadcast_to(
947
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
948
+ )
949
+
950
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
951
+ decoder_module = module._get_decoder_module()
952
+ return decoder_module(
953
+ decoder_input_ids,
954
+ decoder_attention_mask,
955
+ decoder_position_ids,
956
+ **kwargs,
957
+ )
958
+
959
+ init_variables = self.module.init(
960
+ jax.random.PRNGKey(0),
961
+ decoder_input_ids=decoder_input_ids,
962
+ decoder_attention_mask=decoder_attention_mask,
963
+ decoder_position_ids=decoder_position_ids,
964
+ encoder_hidden_states=encoder_outputs[0],
965
+ init_cache=True,
966
+ method=_decoder_forward, # we only need to call the decoder to init the cache
967
+ )
968
+ return unfreeze(init_variables["cache"])
969
+
970
+ @add_start_docstrings(BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING)
971
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotSmallConfig)
972
+ def encode(
973
+ self,
974
+ input_ids: jnp.ndarray,
975
+ attention_mask: Optional[jnp.ndarray] = None,
976
+ position_ids: Optional[jnp.ndarray] = None,
977
+ output_attentions: Optional[bool] = None,
978
+ output_hidden_states: Optional[bool] = None,
979
+ return_dict: Optional[bool] = None,
980
+ train: bool = False,
981
+ params: dict = None,
982
+ dropout_rng: PRNGKey = None,
983
+ ):
984
+ r"""
985
+ Returns:
986
+
987
+ Example:
988
+
989
+ ```python
990
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
991
+
992
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
993
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
994
+
995
+ >>> text = "My friends are cool but they eat too many carbs."
996
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
997
+ >>> encoder_outputs = model.encode(**inputs)
998
+ ```"""
999
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1000
+ output_hidden_states = (
1001
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1002
+ )
1003
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1004
+
1005
+ if attention_mask is None:
1006
+ attention_mask = jnp.ones_like(input_ids)
1007
+ if position_ids is None:
1008
+ batch_size, sequence_length = input_ids.shape
1009
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1010
+
1011
+ # Handle any PRNG if needed
1012
+ rngs = {}
1013
+ if dropout_rng is not None:
1014
+ rngs["dropout"] = dropout_rng
1015
+
1016
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
1017
+ encode_module = module._get_encoder_module()
1018
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
1019
+
1020
+ return self.module.apply(
1021
+ {"params": params or self.params},
1022
+ input_ids=jnp.array(input_ids, dtype="i4"),
1023
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1024
+ position_ids=jnp.array(position_ids, dtype="i4"),
1025
+ output_attentions=output_attentions,
1026
+ output_hidden_states=output_hidden_states,
1027
+ return_dict=return_dict,
1028
+ deterministic=not train,
1029
+ rngs=rngs,
1030
+ method=_encoder_forward,
1031
+ )
1032
+
1033
+ @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING)
1034
+ @replace_return_docstrings(
1035
+ output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotSmallConfig
1036
+ )
1037
+ def decode(
1038
+ self,
1039
+ decoder_input_ids,
1040
+ encoder_outputs,
1041
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1042
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1043
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1044
+ past_key_values: dict = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ train: bool = False,
1049
+ params: dict = None,
1050
+ dropout_rng: PRNGKey = None,
1051
+ ):
1052
+ r"""
1053
+ Returns:
1054
+
1055
+ Example:
1056
+
1057
+ ```python
1058
+ >>> import jax.numpy as jnp
1059
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1060
+
1061
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1062
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1063
+
1064
+ >>> text = "My friends are cool but they eat too many carbs."
1065
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1066
+ >>> encoder_outputs = model.encode(**inputs)
1067
+
1068
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1069
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1070
+
1071
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1072
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
1073
+ ```"""
1074
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1075
+ output_hidden_states = (
1076
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1077
+ )
1078
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1079
+
1080
+ encoder_hidden_states = encoder_outputs[0]
1081
+ if encoder_attention_mask is None:
1082
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1083
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1084
+
1085
+ batch_size, sequence_length = decoder_input_ids.shape
1086
+ if decoder_attention_mask is None:
1087
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1088
+
1089
+ if decoder_position_ids is None:
1090
+ if past_key_values is not None:
1091
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1092
+
1093
+ decoder_position_ids = jnp.broadcast_to(
1094
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1095
+ )
1096
+
1097
+ # Handle any PRNG if needed
1098
+ rngs = {}
1099
+ if dropout_rng is not None:
1100
+ rngs["dropout"] = dropout_rng
1101
+
1102
+ inputs = {"params": params or self.params}
1103
+
1104
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1105
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1106
+ # it can be changed by FlaxBlenderbotSmallAttention module
1107
+ if past_key_values:
1108
+ inputs["cache"] = past_key_values
1109
+ mutable = ["cache"]
1110
+ else:
1111
+ mutable = False
1112
+
1113
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1114
+ decoder_module = module._get_decoder_module()
1115
+ return decoder_module(
1116
+ decoder_input_ids,
1117
+ decoder_attention_mask,
1118
+ decoder_position_ids,
1119
+ **kwargs,
1120
+ )
1121
+
1122
+ outputs = self.module.apply(
1123
+ inputs,
1124
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1125
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1126
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1127
+ encoder_hidden_states=encoder_hidden_states,
1128
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1129
+ output_attentions=output_attentions,
1130
+ output_hidden_states=output_hidden_states,
1131
+ return_dict=return_dict,
1132
+ deterministic=not train,
1133
+ rngs=rngs,
1134
+ mutable=mutable,
1135
+ method=_decoder_forward,
1136
+ )
1137
+
1138
+ # add updated cache to model output
1139
+ if past_key_values is not None and return_dict:
1140
+ outputs, past = outputs
1141
+ outputs["past_key_values"] = unfreeze(past["cache"])
1142
+ return outputs
1143
+ elif past_key_values is not None and not return_dict:
1144
+ outputs, past = outputs
1145
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1146
+
1147
+ return outputs
1148
+
1149
+ def __call__(
1150
+ self,
1151
+ input_ids: jnp.ndarray,
1152
+ attention_mask: Optional[jnp.ndarray] = None,
1153
+ decoder_input_ids: Optional[jnp.ndarray] = None,
1154
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1155
+ position_ids: Optional[jnp.ndarray] = None,
1156
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1157
+ output_attentions: Optional[bool] = None,
1158
+ output_hidden_states: Optional[bool] = None,
1159
+ return_dict: Optional[bool] = None,
1160
+ train: bool = False,
1161
+ params: dict = None,
1162
+ dropout_rng: PRNGKey = None,
1163
+ ):
1164
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1165
+ output_hidden_states = (
1166
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1167
+ )
1168
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1169
+
1170
+ # prepare encoder inputs
1171
+ if attention_mask is None:
1172
+ attention_mask = jnp.ones_like(input_ids)
1173
+ if position_ids is None:
1174
+ batch_size, sequence_length = input_ids.shape
1175
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1176
+
1177
+ # prepare decoder inputs
1178
+ if decoder_input_ids is None:
1179
+ decoder_input_ids = shift_tokens_right(
1180
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
1181
+ )
1182
+ if decoder_attention_mask is None:
1183
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1184
+ if decoder_position_ids is None:
1185
+ batch_size, sequence_length = decoder_input_ids.shape
1186
+ decoder_position_ids = jnp.broadcast_to(
1187
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1188
+ )
1189
+
1190
+ # Handle any PRNG if needed
1191
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1192
+
1193
+ return self.module.apply(
1194
+ {"params": params or self.params},
1195
+ input_ids=jnp.array(input_ids, dtype="i4"),
1196
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1197
+ position_ids=jnp.array(position_ids, dtype="i4"),
1198
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1199
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1200
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ deterministic=not train,
1205
+ rngs=rngs,
1206
+ )
1207
+
1208
+
1209
+ @add_start_docstrings(
1210
+ "The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top.",
1211
+ BLENDERBOT_SMALL_START_DOCSTRING,
1212
+ )
1213
+ class FlaxBlenderbotSmallModel(FlaxBlenderbotSmallPreTrainedModel):
1214
+ config: BlenderbotSmallConfig
1215
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
1216
+ module_class = FlaxBlenderbotSmallModule
1217
+
1218
+
1219
+ append_call_sample_docstring(FlaxBlenderbotSmallModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
1220
+
1221
+
1222
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->BlenderbotSmall
1223
+ class FlaxBlenderbotSmallForConditionalGenerationModule(nn.Module):
1224
+ config: BlenderbotSmallConfig
1225
+ dtype: jnp.dtype = jnp.float32
1226
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
1227
+
1228
+ def setup(self):
1229
+ self.model = FlaxBlenderbotSmallModule(config=self.config, dtype=self.dtype)
1230
+ self.lm_head = nn.Dense(
1231
+ self.model.shared.num_embeddings,
1232
+ use_bias=False,
1233
+ dtype=self.dtype,
1234
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1235
+ )
1236
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
1237
+
1238
+ def _get_encoder_module(self):
1239
+ return self.model.encoder
1240
+
1241
+ def _get_decoder_module(self):
1242
+ return self.model.decoder
1243
+
1244
+ def __call__(
1245
+ self,
1246
+ input_ids,
1247
+ attention_mask,
1248
+ decoder_input_ids,
1249
+ decoder_attention_mask,
1250
+ position_ids,
1251
+ decoder_position_ids,
1252
+ output_attentions: bool = False,
1253
+ output_hidden_states: bool = False,
1254
+ return_dict: bool = True,
1255
+ deterministic: bool = True,
1256
+ ):
1257
+ outputs = self.model(
1258
+ input_ids=input_ids,
1259
+ attention_mask=attention_mask,
1260
+ decoder_input_ids=decoder_input_ids,
1261
+ decoder_attention_mask=decoder_attention_mask,
1262
+ position_ids=position_ids,
1263
+ decoder_position_ids=decoder_position_ids,
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ deterministic=deterministic,
1268
+ )
1269
+
1270
+ hidden_states = outputs[0]
1271
+
1272
+ if self.config.tie_word_embeddings:
1273
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
1274
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1275
+ else:
1276
+ lm_logits = self.lm_head(hidden_states)
1277
+
1278
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
1279
+
1280
+ if not return_dict:
1281
+ output = (lm_logits,) + outputs[1:]
1282
+ return output
1283
+
1284
+ return FlaxSeq2SeqLMOutput(
1285
+ logits=lm_logits,
1286
+ decoder_hidden_states=outputs.decoder_hidden_states,
1287
+ decoder_attentions=outputs.decoder_attentions,
1288
+ cross_attentions=outputs.cross_attentions,
1289
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1290
+ encoder_hidden_states=outputs.encoder_hidden_states,
1291
+ encoder_attentions=outputs.encoder_attentions,
1292
+ )
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
1297
+ BLENDERBOT_SMALL_START_DOCSTRING,
1298
+ )
1299
+ class FlaxBlenderbotSmallForConditionalGeneration(FlaxBlenderbotSmallPreTrainedModel):
1300
+ module_class = FlaxBlenderbotSmallForConditionalGenerationModule
1301
+ dtype: jnp.dtype = jnp.float32
1302
+
1303
+ @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING)
1304
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotSmallConfig)
1305
+ def decode(
1306
+ self,
1307
+ decoder_input_ids,
1308
+ encoder_outputs,
1309
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1310
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1311
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1312
+ past_key_values: dict = None,
1313
+ output_attentions: Optional[bool] = None,
1314
+ output_hidden_states: Optional[bool] = None,
1315
+ return_dict: Optional[bool] = None,
1316
+ deterministic: bool = True,
1317
+ params: dict = None,
1318
+ dropout_rng: PRNGKey = None,
1319
+ ):
1320
+ r"""
1321
+ Returns:
1322
+
1323
+ Example:
1324
+
1325
+ ```python
1326
+ >>> import jax.numpy as jnp
1327
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1328
+
1329
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1330
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1331
+
1332
+ >>> text = "My friends are cool but they eat too many carbs."
1333
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1334
+ >>> encoder_outputs = model.encode(**inputs)
1335
+
1336
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1337
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1338
+
1339
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1340
+ >>> logits = outputs.logits
1341
+ ```"""
1342
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1343
+ output_hidden_states = (
1344
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1345
+ )
1346
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1347
+
1348
+ encoder_hidden_states = encoder_outputs[0]
1349
+ if encoder_attention_mask is None:
1350
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1351
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1352
+
1353
+ batch_size, sequence_length = decoder_input_ids.shape
1354
+ if decoder_attention_mask is None:
1355
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1356
+
1357
+ if decoder_position_ids is None:
1358
+ if past_key_values is not None:
1359
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1360
+
1361
+ decoder_position_ids = jnp.broadcast_to(
1362
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1363
+ )
1364
+
1365
+ # Handle any PRNG if needed
1366
+ rngs = {}
1367
+ if dropout_rng is not None:
1368
+ rngs["dropout"] = dropout_rng
1369
+
1370
+ inputs = {"params": params or self.params}
1371
+
1372
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1373
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1374
+ # it can be changed by FlaxBlenderbotSmallAttention module
1375
+ if past_key_values:
1376
+ inputs["cache"] = past_key_values
1377
+ mutable = ["cache"]
1378
+ else:
1379
+ mutable = False
1380
+
1381
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1382
+ decoder_module = module._get_decoder_module()
1383
+ outputs = decoder_module(
1384
+ decoder_input_ids,
1385
+ decoder_attention_mask,
1386
+ decoder_position_ids,
1387
+ **kwargs,
1388
+ )
1389
+ hidden_states = outputs[0]
1390
+
1391
+ if self.config.tie_word_embeddings:
1392
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
1393
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1394
+ else:
1395
+ lm_logits = module.lm_head(hidden_states)
1396
+
1397
+ lm_logits += module.final_logits_bias.astype(self.dtype)
1398
+ return lm_logits, outputs
1399
+
1400
+ outputs = self.module.apply(
1401
+ inputs,
1402
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1403
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1404
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1405
+ encoder_hidden_states=encoder_hidden_states,
1406
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ deterministic=deterministic,
1411
+ rngs=rngs,
1412
+ mutable=mutable,
1413
+ method=_decoder_forward,
1414
+ )
1415
+
1416
+ if past_key_values is None:
1417
+ lm_logits, decoder_outputs = outputs
1418
+ else:
1419
+ (lm_logits, decoder_outputs), past = outputs
1420
+
1421
+ if return_dict:
1422
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
1423
+ logits=lm_logits,
1424
+ hidden_states=decoder_outputs.hidden_states,
1425
+ attentions=decoder_outputs.attentions,
1426
+ cross_attentions=decoder_outputs.cross_attentions,
1427
+ )
1428
+ else:
1429
+ outputs = (lm_logits,) + decoder_outputs[1:]
1430
+
1431
+ # add updated cache to model output
1432
+ if past_key_values is not None and return_dict:
1433
+ outputs["past_key_values"] = unfreeze(past["cache"])
1434
+ return outputs
1435
+ elif past_key_values is not None and not return_dict:
1436
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1437
+
1438
+ return outputs
1439
+
1440
+ def prepare_inputs_for_generation(
1441
+ self,
1442
+ decoder_input_ids,
1443
+ max_length,
1444
+ attention_mask: Optional[jax.Array] = None,
1445
+ decoder_attention_mask: Optional[jax.Array] = None,
1446
+ encoder_outputs=None,
1447
+ **kwargs,
1448
+ ):
1449
+ # initializing the cache
1450
+ batch_size, seq_length = decoder_input_ids.shape
1451
+
1452
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1453
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1454
+ # But since the decoder uses a causal mask, those positions are masked anyways.
1455
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
1456
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1457
+ if decoder_attention_mask is not None:
1458
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1459
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
1460
+ else:
1461
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1462
+
1463
+ return {
1464
+ "past_key_values": past_key_values,
1465
+ "encoder_outputs": encoder_outputs,
1466
+ "encoder_attention_mask": attention_mask,
1467
+ "decoder_attention_mask": extended_attention_mask,
1468
+ "decoder_position_ids": position_ids,
1469
+ }
1470
+
1471
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1472
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1473
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
1474
+ return model_kwargs
1475
+
1476
+
1477
+ FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING = """
1478
+ Returns:
1479
+
1480
+ Summarization example:
1481
+
1482
+ ```py
1483
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1484
+
1485
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1486
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1487
+
1488
+ >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
1489
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
1490
+
1491
+ >>> # Generate Summary
1492
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
1493
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
1494
+ ```
1495
+
1496
+ Mask filling example:
1497
+
1498
+ ```py
1499
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1500
+
1501
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1502
+ >>> TXT = "My friends are <mask> but they eat too many carbs."
1503
+
1504
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1505
+ >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"]
1506
+ >>> logits = model(input_ids).logits
1507
+
1508
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
1509
+ >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
1510
+ >>> values, predictions = jax.lax.top_k(probs)
1511
+
1512
+ >>> tokenizer.decode(predictions).split()
1513
+ ```
1514
+ """
1515
+
1516
+ overwrite_call_docstring(
1517
+ FlaxBlenderbotSmallForConditionalGeneration,
1518
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING + FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING,
1519
+ )
1520
+ append_replace_return_docstrings(
1521
+ FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
1522
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for BlenderbotSmall."""
16
+
17
+ import json
18
+ import os
19
+ from typing import Dict, List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ "tokenizer_config_file": "tokenizer_config.json",
34
+ }
35
+
36
+
37
+ def get_pairs(word):
38
+ """
39
+ Return set of symbol pairs in a word.
40
+
41
+ Word is represented as tuple of symbols (symbols being variable-length strings).
42
+ """
43
+ pairs = set()
44
+ prev_char = word[0]
45
+ for char in word[1:]:
46
+ pairs.add((prev_char, char))
47
+ prev_char = char
48
+
49
+ pairs = set(pairs)
50
+ return pairs
51
+
52
+
53
+ class BlenderbotSmallTokenizer(PreTrainedTokenizer):
54
+ """
55
+ Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ the superclass for more information regarding methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ merges_file (`str`):
64
+ Path to the merges file.
65
+ bos_token (`str`, *optional*, defaults to `"__start__"`):
66
+ The beginning of sentence token.
67
+ eos_token (`str`, *optional*, defaults to `"__end__"`):
68
+ The end of sentence token.
69
+ unk_token (`str`, *optional*, defaults to `"__unk__"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ pad_token (`str`, *optional*, defaults to `"__null__"`):
73
+ The token used for padding, for example when batching sequences of different lengths.
74
+ kwargs (*optional*):
75
+ Additional keyword arguments passed along to [`PreTrainedTokenizer`]
76
+ """
77
+
78
+ vocab_files_names = VOCAB_FILES_NAMES
79
+ model_input_names = ["input_ids", "attention_mask"]
80
+
81
+ def __init__(
82
+ self,
83
+ vocab_file,
84
+ merges_file,
85
+ bos_token="__start__",
86
+ eos_token="__end__",
87
+ unk_token="__unk__",
88
+ pad_token="__null__",
89
+ **kwargs,
90
+ ):
91
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
92
+ self.encoder = json.load(vocab_handle)
93
+ self.decoder = {v: k for k, v in self.encoder.items()}
94
+ with open(merges_file, encoding="utf-8") as merges_handle:
95
+ merges = merges_handle.read().split("\n")[1:-1]
96
+ merges = [tuple(merge.split()) for merge in merges]
97
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
98
+ self.cache = {}
99
+ super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
100
+
101
+ @property
102
+ def vocab_size(self) -> int:
103
+ return len(self.encoder)
104
+
105
+ def get_vocab(self) -> Dict:
106
+ return dict(self.encoder, **self.added_tokens_encoder)
107
+
108
+ def bpe(self, token: str) -> str:
109
+ if token in self.cache:
110
+ return self.cache[token]
111
+ token = re.sub("([.,!?()])", r" \1", token)
112
+ token = re.sub("(')", r" \1 ", token)
113
+ token = re.sub(r"\s{2,}", " ", token)
114
+ if "\n" in token:
115
+ token = token.replace("\n", " __newln__")
116
+
117
+ tokens = token.split(" ")
118
+ words = []
119
+ for token in tokens:
120
+ if not len(token):
121
+ continue
122
+
123
+ token = token.lower()
124
+ word = tuple(token)
125
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
126
+ pairs = get_pairs(word)
127
+
128
+ if not pairs:
129
+ words.append(token)
130
+ continue
131
+
132
+ while True:
133
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
134
+ if bigram not in self.bpe_ranks:
135
+ break
136
+ first, second = bigram
137
+ new_word = []
138
+ i = 0
139
+
140
+ while i < len(word):
141
+ try:
142
+ j = word.index(first, i)
143
+ new_word.extend(word[i:j])
144
+ i = j
145
+ except ValueError:
146
+ new_word.extend(word[i:])
147
+ break
148
+
149
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
150
+ new_word.append(first + second)
151
+ i += 2
152
+ else:
153
+ new_word.append(word[i])
154
+ i += 1
155
+ new_word = tuple(new_word)
156
+ word = new_word
157
+ if len(word) == 1:
158
+ break
159
+ else:
160
+ pairs = get_pairs(word)
161
+ word = "@@ ".join(word)
162
+ word = word[:-4]
163
+
164
+ self.cache[token] = word
165
+ words.append(word)
166
+ return " ".join(words)
167
+
168
+ def _tokenize(self, text: str) -> List[str]:
169
+ """Split a string into tokens using BPE."""
170
+ split_tokens = []
171
+
172
+ words = re.findall(r"\S+\n?", text)
173
+
174
+ for token in words:
175
+ split_tokens.extend(list(self.bpe(token).split(" ")))
176
+ return split_tokens
177
+
178
+ def _convert_token_to_id(self, token: str) -> int:
179
+ """Converts a token to an id using the vocab."""
180
+ token = token.lower()
181
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
182
+
183
+ def _convert_id_to_token(self, index: int) -> str:
184
+ """Converts an index (integer) in a token (str) using the vocab."""
185
+ return self.decoder.get(index, self.unk_token)
186
+
187
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
188
+ """Converts a sequence of tokens in a single string."""
189
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
190
+ return out_string
191
+
192
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
193
+ if not os.path.isdir(save_directory):
194
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
195
+ return
196
+ vocab_file = os.path.join(
197
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
198
+ )
199
+ merge_file = os.path.join(
200
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
201
+ )
202
+
203
+ with open(vocab_file, "w", encoding="utf-8") as f:
204
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
205
+
206
+ index = 0
207
+ with open(merge_file, "w", encoding="utf-8") as writer:
208
+ writer.write("#version: 0.2\n")
209
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
210
+ if index != token_index:
211
+ logger.warning(
212
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
213
+ " Please check that the tokenizer is not corrupted!"
214
+ )
215
+ index = token_index
216
+ writer.write(" ".join(bpe_tokens) + "\n")
217
+ index += 1
218
+
219
+ return vocab_file, merge_file
220
+
221
+ @property
222
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
223
+ def default_chat_template(self):
224
+ """
225
+ A very simple chat template that just adds whitespace between messages.
226
+ """
227
+ logger.warning_once(
228
+ "\nNo chat template is defined for this tokenizer - using the default template "
229
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
230
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
231
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
232
+ )
233
+ return (
234
+ "{% for message in messages %}"
235
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
236
+ "{{ message['content'] }}"
237
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
238
+ "{% endfor %}"
239
+ "{{ eos_token }}"
240
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast tokenization class for BlenderbotSmall."""
16
+ from typing import List, Optional
17
+
18
+ from tokenizers import ByteLevelBPETokenizer
19
+
20
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from ...utils import logging
22
+ from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ "tokenizer_config_file": "tokenizer_config.json",
31
+ }
32
+
33
+
34
+ class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):
35
+ """
36
+ Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library).
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ slow_tokenizer_class = BlenderbotSmallTokenizer
45
+
46
+ def __init__(
47
+ self,
48
+ vocab_file=None,
49
+ merges_file=None,
50
+ unk_token="<|endoftext|>",
51
+ bos_token="<|endoftext|>",
52
+ eos_token="<|endoftext|>",
53
+ add_prefix_space=False,
54
+ trim_offsets=True,
55
+ **kwargs,
56
+ ):
57
+ super().__init__(
58
+ ByteLevelBPETokenizer(
59
+ vocab=vocab_file,
60
+ merges=merges_file,
61
+ add_prefix_space=add_prefix_space,
62
+ trim_offsets=trim_offsets,
63
+ ),
64
+ bos_token=bos_token,
65
+ eos_token=eos_token,
66
+ unk_token=unk_token,
67
+ **kwargs,
68
+ )
69
+ self.add_prefix_space = add_prefix_space
70
+
71
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
72
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
73
+ if token_ids_1 is None:
74
+ return output
75
+
76
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
77
+
78
+ def create_token_type_ids_from_sequences(
79
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
80
+ ) -> List[int]:
81
+ """
82
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall
83
+ does not make use of token type ids, therefore a list of zeros is returned.
84
+
85
+ Args:
86
+ token_ids_0 (`List[int]`):
87
+ List of IDs.
88
+ token_ids_1 (`List[int]`, *optional*):
89
+ Optional second list of IDs for sequence pairs.
90
+
91
+ Returns:
92
+ `List[int]`: List of zeros.
93
+ """
94
+ sep = [self.sep_token_id]
95
+ cls = [self.cls_token_id]
96
+
97
+ if token_ids_1 is None:
98
+ return len(cls + token_ids_0 + sep) * [0]
99
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
100
+
101
+ @property
102
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
103
+ def default_chat_template(self):
104
+ """
105
+ A very simple chat template that just adds whitespace between messages.
106
+ """
107
+ logger.warning_once(
108
+ "\nNo chat template is defined for this tokenizer - using the default template "
109
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
110
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
111
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
112
+ )
113
+ return (
114
+ "{% for message in messages %}"
115
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
116
+ "{{ message['content'] }}"
117
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
118
+ "{% endfor %}"
119
+ "{{ eos_token }}"
120
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ try:
28
+ if not is_sentencepiece_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["tokenization_nllb"] = ["NllbTokenizer"]
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ try:
46
+ if not is_sentencepiece_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .tokenization_nllb import NllbTokenizer
52
+
53
+ try:
54
+ if not is_tokenizers_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .tokenization_nllb_fast import NllbTokenizerFast
60
+
61
+ else:
62
+ import sys
63
+
64
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (941 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
34
+
35
+
36
+ class NllbTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct an NLLB tokenizer.
39
+
40
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
41
+ [SentencePiece](https://github.com/google/sentencepiece).
42
+
43
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
44
+ <tokens> <eos>` for target language documents.
45
+
46
+ Examples:
47
+
48
+ ```python
49
+ >>> from transformers import NllbTokenizer
50
+
51
+ >>> tokenizer = NllbTokenizer.from_pretrained(
52
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
53
+ ... )
54
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
55
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
56
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
57
+ ```
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ Path to the vocabulary file.
62
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
63
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
64
+
65
+ <Tip>
66
+
67
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
68
+ sequence. The token used is the `cls_token`.
69
+
70
+ </Tip>
71
+
72
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
73
+ The end of sequence token.
74
+
75
+ <Tip>
76
+
77
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
78
+ The token used is the `sep_token`.
79
+
80
+ </Tip>
81
+
82
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
83
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
84
+ sequence classification or for a text and a question for question answering. It is also used as the last
85
+ token of a sequence built with special tokens.
86
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
87
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
88
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
89
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
90
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
91
+ token instead.
92
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
93
+ The token used for padding, for example when batching sequences of different lengths.
94
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
95
+ The token used for masking values. This is the token used when training this model with masked language
96
+ modeling. This is the token which the model will try to predict.
97
+ tokenizer_file (`str`, *optional*):
98
+ The path to a tokenizer file to use instead of the vocab file.
99
+ src_lang (`str`, *optional*):
100
+ The language to use as source language for translation.
101
+ tgt_lang (`str`, *optional*):
102
+ The language to use as target language for translation.
103
+ sp_model_kwargs (`Dict[str, str]`):
104
+ Additional keyword arguments to pass to the model initialization.
105
+ """
106
+
107
+ vocab_files_names = VOCAB_FILES_NAMES
108
+ model_input_names = ["input_ids", "attention_mask"]
109
+
110
+ prefix_tokens: List[int] = []
111
+ suffix_tokens: List[int] = []
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_file,
116
+ bos_token="<s>",
117
+ eos_token="</s>",
118
+ sep_token="</s>",
119
+ cls_token="<s>",
120
+ unk_token="<unk>",
121
+ pad_token="<pad>",
122
+ mask_token="<mask>",
123
+ tokenizer_file=None,
124
+ src_lang=None,
125
+ tgt_lang=None,
126
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
127
+ additional_special_tokens=None,
128
+ legacy_behaviour=False,
129
+ **kwargs,
130
+ ):
131
+ if additional_special_tokens is None:
132
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
133
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
134
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
135
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
136
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
137
+ # Mask token behave like a normal word, i.e. include the space before it
138
+ mask_token = (
139
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
140
+ if isinstance(mask_token, str)
141
+ else mask_token
142
+ )
143
+
144
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
145
+ self.legacy_behaviour = legacy_behaviour
146
+
147
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
148
+ self.sp_model.Load(str(vocab_file))
149
+ self.vocab_file = vocab_file
150
+ # Original fairseq vocab and spm vocab must be "aligned":
151
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
152
+ # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
153
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
154
+ # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
155
+
156
+ # unk token needs to be in the vocab with correct index
157
+ self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token}
158
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
159
+ self.fairseq_offset = 1
160
+ self.sp_model_size = len(self.sp_model)
161
+
162
+ # Everything that follows is kept for BC and will be removed in v4.38
163
+ self._fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
164
+ language_codes = FAIRSEQ_LANGUAGE_CODES if additional_special_tokens is None else additional_special_tokens
165
+ self._lang_code_to_id = {
166
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(language_codes)
167
+ }
168
+ self._id_to_lang_code = {v: k for k, v in self._lang_code_to_id.items()}
169
+ self._fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
170
+
171
+ self._fairseq_tokens_to_ids.update(self.lang_code_to_id)
172
+ self._fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
173
+
174
+ super().__init__(
175
+ bos_token=bos_token,
176
+ eos_token=eos_token,
177
+ unk_token=unk_token,
178
+ sep_token=sep_token,
179
+ cls_token=cls_token,
180
+ pad_token=pad_token,
181
+ mask_token=mask_token,
182
+ tokenizer_file=tokenizer_file,
183
+ src_lang=src_lang,
184
+ tgt_lang=tgt_lang,
185
+ additional_special_tokens=additional_special_tokens,
186
+ sp_model_kwargs=self.sp_model_kwargs,
187
+ legacy_behaviour=legacy_behaviour,
188
+ **kwargs,
189
+ )
190
+
191
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
192
+ self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang)
193
+ self.tgt_lang = tgt_lang
194
+ self.set_src_lang_special_tokens(self._src_lang)
195
+
196
+ def __getstate__(self):
197
+ state = self.__dict__.copy()
198
+ state["sp_model"] = None
199
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
200
+ return state
201
+
202
+ def __setstate__(self, d):
203
+ self.__dict__ = d
204
+
205
+ # for backward compatibility
206
+ if not hasattr(self, "sp_model_kwargs"):
207
+ self.sp_model_kwargs = {}
208
+
209
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
210
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
211
+
212
+ @property
213
+ def vocab_size(self):
214
+ return len(self.sp_model) + self.fairseq_offset
215
+
216
+ @property
217
+ def src_lang(self) -> str:
218
+ return self._src_lang
219
+
220
+ @property
221
+ def lang_code_to_id(self):
222
+ logger.warning_once(
223
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
224
+ " this attribute will be removed in `transformers` v4.38"
225
+ )
226
+ return self._lang_code_to_id
227
+
228
+ @property
229
+ def fairseq_tokens_to_ids(self):
230
+ logger.warning_once(
231
+ "the `fairseq_tokens_to_ids` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
232
+ " this attribute will be removed in `transformers` v4.38"
233
+ )
234
+ return self._fairseq_tokens_to_ids
235
+
236
+ @property
237
+ def id_to_lang_code(self):
238
+ logger.warning_once(
239
+ "the `id_to_lang_code` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
240
+ " this attribute will be removed in `transformers` v4.38"
241
+ )
242
+ return self._id_to_lang_code
243
+
244
+ @property
245
+ def fairseq_ids_to_tokens(self):
246
+ logger.warning_once(
247
+ "the `_fairseq_ids_to_tokens` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
248
+ " this attribute will be removed in `transformers` v4.38"
249
+ )
250
+ return self._fairseq_ids_to_tokens
251
+
252
+ @src_lang.setter
253
+ def src_lang(self, new_src_lang: str) -> None:
254
+ self._src_lang = new_src_lang
255
+ self.set_src_lang_special_tokens(self._src_lang)
256
+
257
+ def get_special_tokens_mask(
258
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
259
+ ) -> List[int]:
260
+ """
261
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
262
+ special tokens using the tokenizer `prepare_for_model` method.
263
+
264
+ Args:
265
+ token_ids_0 (`List[int]`):
266
+ List of IDs.
267
+ token_ids_1 (`List[int]`, *optional*):
268
+ Optional second list of IDs for sequence pairs.
269
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
270
+ Whether or not the token list is already formatted with special tokens for the model.
271
+
272
+ Returns:
273
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
274
+ """
275
+
276
+ if already_has_special_tokens:
277
+ return super().get_special_tokens_mask(
278
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
279
+ )
280
+
281
+ prefix_ones = [1] * len(self.prefix_tokens)
282
+ suffix_ones = [1] * len(self.suffix_tokens)
283
+ if token_ids_1 is None:
284
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
285
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
286
+
287
+ def build_inputs_with_special_tokens(
288
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
289
+ ) -> List[int]:
290
+ """
291
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
292
+ adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
293
+
294
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
295
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
296
+
297
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
298
+ separator.
299
+
300
+ Args:
301
+ token_ids_0 (`List[int]`):
302
+ List of IDs to which the special tokens will be added.
303
+ token_ids_1 (`List[int]`, *optional*):
304
+ Optional second list of IDs for sequence pairs.
305
+
306
+ Returns:
307
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
308
+ """
309
+ if token_ids_1 is None:
310
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
311
+ # We don't expect to process pairs, but leave the pair logic for API consistency
312
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
313
+
314
+ def create_token_type_ids_from_sequences(
315
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
316
+ ) -> List[int]:
317
+ """
318
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
319
+ make use of token type ids, therefore a list of zeros is returned.
320
+
321
+ Args:
322
+ token_ids_0 (`List[int]`):
323
+ List of IDs.
324
+ token_ids_1 (`List[int]`, *optional*):
325
+ Optional second list of IDs for sequence pairs.
326
+
327
+ Returns:
328
+ `List[int]`: List of zeros.
329
+
330
+ """
331
+
332
+ sep = [self.sep_token_id]
333
+ cls = [self.cls_token_id]
334
+
335
+ if token_ids_1 is None:
336
+ return len(cls + token_ids_0 + sep) * [0]
337
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
338
+
339
+ def _build_translation_inputs(
340
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
341
+ ):
342
+ """Used by translation pipeline, to prepare inputs for the generate function"""
343
+ if src_lang is None or tgt_lang is None:
344
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
345
+ self.src_lang = src_lang
346
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
347
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
348
+ inputs["forced_bos_token_id"] = tgt_lang_id
349
+ return inputs
350
+
351
+ def get_vocab(self):
352
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
353
+ vocab.update(self.added_tokens_encoder)
354
+ return vocab
355
+
356
+ def _tokenize(self, text: str) -> List[str]:
357
+ return self.sp_model.encode(text, out_type=str)
358
+
359
+ def _convert_token_to_id(self, token):
360
+ """Converts a token (str) in an id using the vocab."""
361
+ spm_id = self.sp_model.PieceToId(token)
362
+ # Need to return unknown token if the SP model returned 0
363
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
364
+
365
+ def _convert_id_to_token(self, index):
366
+ """Converts an index (integer) in a token (str) using the vocab."""
367
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
368
+
369
+ def convert_tokens_to_string(self, tokens):
370
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
371
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
372
+ return out_string
373
+
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+
382
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
383
+ copyfile(self.vocab_file, out_vocab_file)
384
+ elif not os.path.isfile(self.vocab_file):
385
+ with open(out_vocab_file, "wb") as fi:
386
+ content_spiece_model = self.sp_model.serialized_model_proto()
387
+ fi.write(content_spiece_model)
388
+
389
+ return (out_vocab_file,)
390
+
391
+ def prepare_seq2seq_batch(
392
+ self,
393
+ src_texts: List[str],
394
+ src_lang: str = "eng_Latn",
395
+ tgt_texts: Optional[List[str]] = None,
396
+ tgt_lang: str = "fra_Latn",
397
+ **kwargs,
398
+ ) -> BatchEncoding:
399
+ self.src_lang = src_lang
400
+ self.tgt_lang = tgt_lang
401
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
402
+
403
+ def _switch_to_input_mode(self):
404
+ return self.set_src_lang_special_tokens(self.src_lang)
405
+
406
+ def _switch_to_target_mode(self):
407
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
408
+
409
+ def set_src_lang_special_tokens(self, src_lang) -> None:
410
+ """Reset the special tokens to the source lang setting.
411
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
412
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
413
+ """
414
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
415
+ if self.legacy_behaviour:
416
+ self.prefix_tokens = []
417
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
418
+ else:
419
+ self.prefix_tokens = [self.cur_lang_code]
420
+ self.suffix_tokens = [self.eos_token_id]
421
+
422
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
423
+ """Reset the special tokens to the target lang setting.
424
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
425
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
426
+ """
427
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
428
+ if self.legacy_behaviour:
429
+ self.prefix_tokens = []
430
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
431
+ else:
432
+ self.prefix_tokens = [self.cur_lang_code]
433
+ self.suffix_tokens = [self.eos_token_id]
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb_fast.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_nllb import NllbTokenizer
29
+ else:
30
+ NllbTokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
37
+
38
+
39
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
40
+
41
+
42
+ class NllbTokenizerFast(PreTrainedTokenizerFast):
43
+ """
44
+ Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on
45
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
46
+
47
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
48
+ refer to this superclass for more information regarding those methods.
49
+
50
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
51
+ <tokens> <eos>` for target language documents.
52
+
53
+ Examples:
54
+
55
+ ```python
56
+ >>> from transformers import NllbTokenizerFast
57
+
58
+ >>> tokenizer = NllbTokenizerFast.from_pretrained(
59
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
60
+ ... )
61
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
62
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
63
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
64
+ ```
65
+
66
+ Args:
67
+ vocab_file (`str`):
68
+ Path to the vocabulary file.
69
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
70
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
71
+
72
+ <Tip>
73
+
74
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
75
+ sequence. The token used is the `cls_token`.
76
+
77
+ </Tip>
78
+
79
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
80
+ The end of sequence token.
81
+
82
+ <Tip>
83
+
84
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
85
+ The token used is the `sep_token`.
86
+
87
+ </Tip>
88
+
89
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
90
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
91
+ sequence classification or for a text and a question for question answering. It is also used as the last
92
+ token of a sequence built with special tokens.
93
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
94
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
95
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
96
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
97
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
98
+ token instead.
99
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
100
+ The token used for padding, for example when batching sequences of different lengths.
101
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
102
+ The token used for masking values. This is the token used when training this model with masked language
103
+ modeling. This is the token which the model will try to predict.
104
+ tokenizer_file (`str`, *optional*):
105
+ The path to a tokenizer file to use instead of the vocab file.
106
+ src_lang (`str`, *optional*):
107
+ The language to use as source language for translation.
108
+ tgt_lang (`str`, *optional*):
109
+ The language to use as target language for translation.
110
+ """
111
+
112
+ vocab_files_names = VOCAB_FILES_NAMES
113
+ model_input_names = ["input_ids", "attention_mask"]
114
+ slow_tokenizer_class = NllbTokenizer
115
+
116
+ prefix_tokens: List[int] = []
117
+ suffix_tokens: List[int] = []
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_file=None,
122
+ tokenizer_file=None,
123
+ bos_token="<s>",
124
+ eos_token="</s>",
125
+ sep_token="</s>",
126
+ cls_token="<s>",
127
+ unk_token="<unk>",
128
+ pad_token="<pad>",
129
+ mask_token="<mask>",
130
+ src_lang=None,
131
+ tgt_lang=None,
132
+ additional_special_tokens=None,
133
+ legacy_behaviour=False,
134
+ **kwargs,
135
+ ):
136
+ if additional_special_tokens is None:
137
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
138
+
139
+ self.vocab_file = vocab_file
140
+ # Mask token behave like a normal word, i.e. include the space before it
141
+ mask_token = (
142
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
143
+ if isinstance(mask_token, str)
144
+ else mask_token
145
+ )
146
+ self.legacy_behaviour = legacy_behaviour
147
+ super().__init__(
148
+ vocab_file=vocab_file,
149
+ tokenizer_file=tokenizer_file,
150
+ bos_token=bos_token,
151
+ eos_token=eos_token,
152
+ sep_token=sep_token,
153
+ cls_token=cls_token,
154
+ unk_token=unk_token,
155
+ pad_token=pad_token,
156
+ src_lang=src_lang,
157
+ tgt_lang=tgt_lang,
158
+ mask_token=mask_token,
159
+ additional_special_tokens=additional_special_tokens,
160
+ legacy_behaviour=legacy_behaviour,
161
+ **kwargs,
162
+ )
163
+
164
+ self._lang_code_to_id = {
165
+ lang_code: self.convert_tokens_to_ids(str(lang_code)) for lang_code in additional_special_tokens
166
+ }
167
+
168
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
169
+ self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
170
+ self.tgt_lang = tgt_lang
171
+ self.set_src_lang_special_tokens(self._src_lang)
172
+
173
+ @property
174
+ def lang_code_to_id(self):
175
+ logger.warning_once(
176
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
177
+ " this attribute will be removed in `transformers` v4.38"
178
+ )
179
+ return self._lang_code_to_id
180
+
181
+ @property
182
+ def can_save_slow_tokenizer(self) -> bool:
183
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
184
+
185
+ @property
186
+ def src_lang(self) -> str:
187
+ return self._src_lang
188
+
189
+ @src_lang.setter
190
+ def src_lang(self, new_src_lang: str) -> None:
191
+ self._src_lang = new_src_lang
192
+ self.set_src_lang_special_tokens(self._src_lang)
193
+
194
+ def build_inputs_with_special_tokens(
195
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
196
+ ) -> List[int]:
197
+ """
198
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
199
+ adding special tokens. The special tokens depend on calling set_lang.
200
+
201
+ An NLLB sequence has the following format, where `X` represents the sequence:
202
+
203
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
204
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
205
+
206
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
207
+ separator.
208
+
209
+ Args:
210
+ token_ids_0 (`List[int]`):
211
+ List of IDs to which the special tokens will be added.
212
+ token_ids_1 (`List[int]`, *optional*):
213
+ Optional second list of IDs for sequence pairs.
214
+
215
+ Returns:
216
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
217
+ """
218
+ if token_ids_1 is None:
219
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
220
+ # We don't expect to process pairs, but leave the pair logic for API consistency
221
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
222
+
223
+ def create_token_type_ids_from_sequences(
224
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
225
+ ) -> List[int]:
226
+ """
227
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
228
+ make use of token type ids, therefore a list of zeros is returned.
229
+
230
+ Args:
231
+ token_ids_0 (`List[int]`):
232
+ List of IDs.
233
+ token_ids_1 (`List[int]`, *optional*):
234
+ Optional second list of IDs for sequence pairs.
235
+
236
+ Returns:
237
+ `List[int]`: List of zeros.
238
+
239
+ """
240
+
241
+ sep = [self.sep_token_id]
242
+ cls = [self.cls_token_id]
243
+
244
+ if token_ids_1 is None:
245
+ return len(cls + token_ids_0 + sep) * [0]
246
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
247
+
248
+ def _build_translation_inputs(
249
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
250
+ ):
251
+ """Used by translation pipeline, to prepare inputs for the generate function"""
252
+ if src_lang is None or tgt_lang is None:
253
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
254
+ self.src_lang = src_lang
255
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
256
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
257
+ inputs["forced_bos_token_id"] = tgt_lang_id
258
+ return inputs
259
+
260
+ def prepare_seq2seq_batch(
261
+ self,
262
+ src_texts: List[str],
263
+ src_lang: str = "eng_Latn",
264
+ tgt_texts: Optional[List[str]] = None,
265
+ tgt_lang: str = "fra_Latn",
266
+ **kwargs,
267
+ ) -> BatchEncoding:
268
+ self.src_lang = src_lang
269
+ self.tgt_lang = tgt_lang
270
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
271
+
272
+ def _switch_to_input_mode(self):
273
+ return self.set_src_lang_special_tokens(self.src_lang)
274
+
275
+ def _switch_to_target_mode(self):
276
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
277
+
278
+ def set_src_lang_special_tokens(self, src_lang) -> None:
279
+ """Reset the special tokens to the source lang setting.
280
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
281
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
282
+ """
283
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
284
+
285
+ if self.legacy_behaviour:
286
+ self.prefix_tokens = []
287
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
288
+ else:
289
+ self.prefix_tokens = [self.cur_lang_code]
290
+ self.suffix_tokens = [self.eos_token_id]
291
+
292
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
293
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
294
+
295
+ self._tokenizer.post_processor = processors.TemplateProcessing(
296
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
297
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
298
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
299
+ )
300
+
301
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
302
+ """Reset the special tokens to the target lang setting.
303
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
304
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
305
+ """
306
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
307
+ if self.legacy_behaviour:
308
+ self.prefix_tokens = []
309
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
310
+ else:
311
+ self.prefix_tokens = [self.cur_lang_code]
312
+ self.suffix_tokens = [self.eos_token_id]
313
+
314
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
315
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
316
+
317
+ self._tokenizer.post_processor = processors.TemplateProcessing(
318
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
319
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
320
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
321
+ )
322
+
323
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
324
+ if not self.can_save_slow_tokenizer:
325
+ raise ValueError(
326
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
327
+ "tokenizer."
328
+ )
329
+
330
+ if not os.path.isdir(save_directory):
331
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
332
+ return
333
+ out_vocab_file = os.path.join(
334
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
335
+ )
336
+
337
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
338
+ copyfile(self.vocab_file, out_vocab_file)
339
+
340
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
26
+
27
+ try:
28
+ if not is_sentencepiece_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["tokenization_plbart"] = ["PLBartTokenizer"]
34
+
35
+ try:
36
+ if not is_torch_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_plbart"] = [
42
+ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
43
+ "PLBartForCausalLM",
44
+ "PLBartForConditionalGeneration",
45
+ "PLBartForSequenceClassification",
46
+ "PLBartModel",
47
+ "PLBartPreTrainedModel",
48
+ ]
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
53
+
54
+ try:
55
+ if not is_sentencepiece_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ from .tokenization_plbart import PLBartTokenizer
61
+
62
+ try:
63
+ if not is_torch_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .modeling_plbart import (
69
+ PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
70
+ PLBartForCausalLM,
71
+ PLBartForConditionalGeneration,
72
+ PLBartForSequenceClassification,
73
+ PLBartModel,
74
+ PLBartPreTrainedModel,
75
+ )
76
+
77
+
78
+ else:
79
+ import sys
80
+
81
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/configuration_plbart.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/convert_plbart_original_checkpoint_to_torch.cpython-310.pyc ADDED
Binary file (2.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/modeling_plbart.cpython-310.pyc ADDED
Binary file (56.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/tokenization_plbart.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/configuration_plbart.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PLBART model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfigWithPast
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class PLBartConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an
33
+ PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the PLBART
35
+ [uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 50005):
43
+ Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`PLBartModel`].
45
+ d_model (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the layers and the pooler layer.
47
+ encoder_layers (`int`, *optional*, defaults to 6):
48
+ Number of encoder layers.
49
+ decoder_layers (`int`, *optional*, defaults to 6):
50
+ Number of decoder layers.
51
+ encoder_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ decoder_attention_heads (`int`, *optional*, defaults to 12):
54
+ Number of attention heads for each attention layer in the Transformer decoder.
55
+ decoder_ffn_dim (`int`, *optional*, defaults to 3072):
56
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
57
+ encoder_ffn_dim (`int`, *optional*, defaults to 3072):
58
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
59
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
62
+ dropout (`float`, *optional*, defaults to 0.1):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_dropout (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention probabilities.
66
+ activation_dropout (`float`, *optional*, defaults to 0.0):
67
+ The dropout ratio for activations inside the fully connected layer.
68
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for classifier.
70
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
71
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
72
+ just in case (e.g., 512 or 1024 or 2048).
73
+ init_std (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
76
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
77
+ for more details.
78
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ scale_embedding (`bool`, *optional*, defaults to `True`):
82
+ Scale embeddings by diving by sqrt(d_model).
83
+ use_cache (`bool`, *optional*, defaults to `True`):
84
+ Whether or not the model should return the last key/values attentions (not used by all models)
85
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
86
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
87
+ `eos_token_id`.
88
+
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import PLBartConfig, PLBartModel
93
+
94
+ >>> # Initializing a PLBART uclanlp/plbart-base style configuration
95
+ >>> configuration = PLBartConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration
98
+ >>> model = PLBartModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "plbart"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=50005,
111
+ max_position_embeddings=1024,
112
+ encoder_layers=6,
113
+ encoder_ffn_dim=3072,
114
+ encoder_attention_heads=12,
115
+ decoder_layers=6,
116
+ decoder_ffn_dim=3072,
117
+ decoder_attention_heads=12,
118
+ encoder_layerdrop=0.0,
119
+ decoder_layerdrop=0.0,
120
+ use_cache=True,
121
+ is_encoder_decoder=True,
122
+ activation_function="gelu",
123
+ d_model=768,
124
+ dropout=0.1,
125
+ attention_dropout=0.1,
126
+ activation_dropout=0.0,
127
+ init_std=0.02,
128
+ classifier_dropout=0.0,
129
+ scale_embedding=True,
130
+ pad_token_id=1,
131
+ bos_token_id=0,
132
+ eos_token_id=2,
133
+ forced_eos_token_id=2,
134
+ **kwargs,
135
+ ):
136
+ self.vocab_size = vocab_size
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.d_model = d_model
139
+ self.encoder_ffn_dim = encoder_ffn_dim
140
+ self.encoder_layers = encoder_layers
141
+ self.encoder_attention_heads = encoder_attention_heads
142
+ self.decoder_ffn_dim = decoder_ffn_dim
143
+ self.decoder_layers = decoder_layers
144
+ self.decoder_attention_heads = decoder_attention_heads
145
+ self.dropout = dropout
146
+ self.attention_dropout = attention_dropout
147
+ self.activation_dropout = activation_dropout
148
+ self.activation_function = activation_function
149
+ self.init_std = init_std
150
+ self.encoder_layerdrop = encoder_layerdrop
151
+ self.decoder_layerdrop = decoder_layerdrop
152
+ self.classifier_dropout = classifier_dropout
153
+ self.use_cache = use_cache
154
+ self.num_hidden_layers = encoder_layers
155
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
156
+ super().__init__(
157
+ pad_token_id=pad_token_id,
158
+ bos_token_id=bos_token_id,
159
+ eos_token_id=eos_token_id,
160
+ is_encoder_decoder=is_encoder_decoder,
161
+ forced_eos_token_id=forced_eos_token_id,
162
+ **kwargs,
163
+ )
164
+
165
+
166
+ class PLBartOnnxConfig(OnnxConfigWithPast):
167
+ @property
168
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
169
+ return OrderedDict(
170
+ [
171
+ ("input_ids", {0: "batch", 1: "sequence"}),
172
+ ("attention_mask", {0: "batch", 1: "sequence"}),
173
+ ]
174
+ )
175
+
176
+ @property
177
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
178
+ if self.use_past:
179
+ return OrderedDict(
180
+ [
181
+ ("last_hidden_state", {0: "batch", 1: "sequence"}),
182
+ ("past_keys", {0: "batch", 2: "sequence"}),
183
+ ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}),
184
+ ]
185
+ )
186
+ else:
187
+ return OrderedDict(
188
+ [
189
+ ("last_hidden_state", {0: "batch", 1: "sequence"}),
190
+ ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}),
191
+ ]
192
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from transformers import PLBartConfig, PLBartForConditionalGeneration, PLBartForSequenceClassification
21
+
22
+
23
+ def remove_ignore_keys_(state_dict):
24
+ ignore_keys = [
25
+ "encoder.version",
26
+ "decoder.version",
27
+ "model.encoder.version",
28
+ "model.decoder.version",
29
+ "_float_tensor",
30
+ "decoder.output_projection.weight",
31
+ ]
32
+ for k in ignore_keys:
33
+ state_dict.pop(k, None)
34
+
35
+
36
+ def make_linear_from_emb(emb):
37
+ vocab_size, emb_size = emb.weight.shape
38
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
39
+ lin_layer.weight.data = emb.weight.data
40
+ return lin_layer
41
+
42
+
43
+ def convert_fairseq_plbart_checkpoint_from_disk(
44
+ checkpoint_path, hf_config_path="uclanlp/plbart-base", finetuned=False, classification=False
45
+ ):
46
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
47
+ remove_ignore_keys_(state_dict)
48
+ vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0]
49
+
50
+ plbart_config = PLBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size)
51
+
52
+ state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
53
+ if not classification:
54
+ model = PLBartForConditionalGeneration(plbart_config)
55
+ model.model.load_state_dict(state_dict)
56
+ if finetuned:
57
+ model.lm_head = make_linear_from_emb(model.model.shared)
58
+
59
+ else:
60
+ classification_head = {}
61
+ for key, value in state_dict.copy().items():
62
+ if key.startswith("classification_heads.sentence_classification_head"):
63
+ classification_head[key.replace("classification_heads.sentence_classification_head.", "")] = value
64
+ state_dict.pop(key)
65
+ model = PLBartForSequenceClassification(plbart_config)
66
+ model.model.load_state_dict(state_dict)
67
+ model.classification_head.load_state_dict(classification_head)
68
+
69
+ return model
70
+
71
+
72
+ if __name__ == "__main__":
73
+ parser = argparse.ArgumentParser()
74
+ # Required parameters
75
+ parser.add_argument("fairseq_path", type=str, help="model.pt on local filesystem.")
76
+ parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
77
+ parser.add_argument(
78
+ "--hf_config",
79
+ default="uclanlp/plbart-base",
80
+ type=str,
81
+ help="Which huggingface architecture to use: plbart-base",
82
+ )
83
+ parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
84
+ parser.add_argument(
85
+ "--classification", action="store_true", help="whether the model is a classification checkpoint"
86
+ )
87
+ args = parser.parse_args()
88
+ model = convert_fairseq_plbart_checkpoint_from_disk(
89
+ args.fairseq_path,
90
+ hf_config_path=args.hf_config,
91
+ finetuned=args.finetuned,
92
+ classification=args.classification,
93
+ )
94
+ model.save_pretrained(args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/modeling_plbart.py ADDED
@@ -0,0 +1,1765 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch PLBART model."""
16
+ import copy
17
+ import math
18
+ from typing import Any, Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_attn_mask_utils import (
27
+ _prepare_4d_attention_mask,
28
+ _prepare_4d_attention_mask_for_sdpa,
29
+ _prepare_4d_causal_attention_mask,
30
+ _prepare_4d_causal_attention_mask_for_sdpa,
31
+ )
32
+ from ...modeling_outputs import (
33
+ BaseModelOutput,
34
+ BaseModelOutputWithPastAndCrossAttentions,
35
+ CausalLMOutputWithCrossAttentions,
36
+ Seq2SeqLMOutput,
37
+ Seq2SeqModelOutput,
38
+ Seq2SeqSequenceClassifierOutput,
39
+ )
40
+ from ...modeling_utils import PreTrainedModel
41
+ from ...utils import (
42
+ add_code_sample_docstrings,
43
+ add_end_docstrings,
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ logging,
47
+ replace_return_docstrings,
48
+ )
49
+ from .configuration_plbart import PLBartConfig
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CHECKPOINT_FOR_DOC = "uclanlp/plbart-base"
55
+ _CONFIG_FOR_DOC = "PLBartConfig"
56
+
57
+
58
+ from ..deprecated._archive_maps import PLBART_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ # Copied from transformers.models.mbart.modeling_mbart.shift_tokens_right
62
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int):
63
+ """
64
+ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not
65
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
66
+ """
67
+ prev_output_tokens = input_ids.clone()
68
+
69
+ if pad_token_id is None:
70
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
71
+ # replace possible -100 values in labels by `pad_token_id`
72
+ prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id)
73
+
74
+ index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
75
+ decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
76
+ prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
77
+ prev_output_tokens[:, 0] = decoder_start_tokens
78
+
79
+ return prev_output_tokens
80
+
81
+
82
+ # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->PLBart
83
+ class PLBartLearnedPositionalEmbedding(nn.Embedding):
84
+ """
85
+ This module learns positional embeddings up to a fixed maximum size.
86
+ """
87
+
88
+ def __init__(self, num_embeddings: int, embedding_dim: int):
89
+ # PLBart is set up so that if padding_idx is specified then offset the embedding ids by 2
90
+ # and adjust num_embeddings appropriately. Other models don't have this hack
91
+ self.offset = 2
92
+ super().__init__(num_embeddings + self.offset, embedding_dim)
93
+
94
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
95
+ """`input_ids' shape is expected to be [bsz x seqlen]."""
96
+
97
+ bsz, seq_len = input_ids.shape[:2]
98
+ positions = torch.arange(
99
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
100
+ ).expand(bsz, -1)
101
+
102
+ return super().forward(positions + self.offset)
103
+
104
+
105
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PLBart
106
+ class PLBartAttention(nn.Module):
107
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
108
+
109
+ def __init__(
110
+ self,
111
+ embed_dim: int,
112
+ num_heads: int,
113
+ dropout: float = 0.0,
114
+ is_decoder: bool = False,
115
+ bias: bool = True,
116
+ is_causal: bool = False,
117
+ config: Optional[PLBartConfig] = None,
118
+ ):
119
+ super().__init__()
120
+ self.embed_dim = embed_dim
121
+ self.num_heads = num_heads
122
+ self.dropout = dropout
123
+ self.head_dim = embed_dim // num_heads
124
+ self.config = config
125
+
126
+ if (self.head_dim * num_heads) != self.embed_dim:
127
+ raise ValueError(
128
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
129
+ f" and `num_heads`: {num_heads})."
130
+ )
131
+ self.scaling = self.head_dim**-0.5
132
+ self.is_decoder = is_decoder
133
+ self.is_causal = is_causal
134
+
135
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
136
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
137
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
138
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
139
+
140
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
141
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
142
+
143
+ def forward(
144
+ self,
145
+ hidden_states: torch.Tensor,
146
+ key_value_states: Optional[torch.Tensor] = None,
147
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
148
+ attention_mask: Optional[torch.Tensor] = None,
149
+ layer_head_mask: Optional[torch.Tensor] = None,
150
+ output_attentions: bool = False,
151
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
152
+ """Input shape: Batch x Time x Channel"""
153
+
154
+ # if key_value_states are provided this layer is used as a cross-attention layer
155
+ # for the decoder
156
+ is_cross_attention = key_value_states is not None
157
+
158
+ bsz, tgt_len, _ = hidden_states.size()
159
+
160
+ # get query proj
161
+ query_states = self.q_proj(hidden_states) * self.scaling
162
+ # get key, value proj
163
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
164
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
165
+ # the provided `key_value_states` to support prefix tuning
166
+ if (
167
+ is_cross_attention
168
+ and past_key_value is not None
169
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
170
+ ):
171
+ # reuse k,v, cross_attentions
172
+ key_states = past_key_value[0]
173
+ value_states = past_key_value[1]
174
+ elif is_cross_attention:
175
+ # cross_attentions
176
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
177
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
178
+ elif past_key_value is not None:
179
+ # reuse k, v, self_attention
180
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
181
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
182
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
183
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
184
+ else:
185
+ # self_attention
186
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
187
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
188
+
189
+ if self.is_decoder:
190
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
191
+ # Further calls to cross_attention layer can then reuse all cross-attention
192
+ # key/value_states (first "if" case)
193
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
194
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
195
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
196
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
197
+ past_key_value = (key_states, value_states)
198
+
199
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
200
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
201
+ key_states = key_states.reshape(*proj_shape)
202
+ value_states = value_states.reshape(*proj_shape)
203
+
204
+ src_len = key_states.size(1)
205
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
206
+
207
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
208
+ raise ValueError(
209
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
210
+ f" {attn_weights.size()}"
211
+ )
212
+
213
+ if attention_mask is not None:
214
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
215
+ raise ValueError(
216
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
217
+ )
218
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
219
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
220
+
221
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
222
+
223
+ if layer_head_mask is not None:
224
+ if layer_head_mask.size() != (self.num_heads,):
225
+ raise ValueError(
226
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
227
+ f" {layer_head_mask.size()}"
228
+ )
229
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
230
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
231
+
232
+ if output_attentions:
233
+ # this operation is a bit awkward, but it's required to
234
+ # make sure that attn_weights keeps its gradient.
235
+ # In order to do so, attn_weights have to be reshaped
236
+ # twice and have to be reused in the following
237
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
238
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
239
+ else:
240
+ attn_weights_reshaped = None
241
+
242
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
243
+
244
+ attn_output = torch.bmm(attn_probs, value_states)
245
+
246
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
247
+ raise ValueError(
248
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
249
+ f" {attn_output.size()}"
250
+ )
251
+
252
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
253
+ attn_output = attn_output.transpose(1, 2)
254
+
255
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
256
+ # partitioned across GPUs when using tensor-parallelism.
257
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
258
+
259
+ attn_output = self.out_proj(attn_output)
260
+
261
+ return attn_output, attn_weights_reshaped, past_key_value
262
+
263
+
264
+ # Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->PLBart, BART->PLBART
265
+ class PLBartEncoderLayer(nn.Module):
266
+ def __init__(self, config: PLBartConfig):
267
+ super().__init__()
268
+ self.embed_dim = config.d_model
269
+
270
+ self.self_attn = PLBART_ATTENTION_CLASSES[config._attn_implementation](
271
+ embed_dim=self.embed_dim,
272
+ num_heads=config.encoder_attention_heads,
273
+ dropout=config.attention_dropout,
274
+ config=config,
275
+ )
276
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
277
+ self.dropout = config.dropout
278
+ self.activation_fn = ACT2FN[config.activation_function]
279
+ self.activation_dropout = config.activation_dropout
280
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
281
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
282
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
283
+
284
+ def forward(
285
+ self,
286
+ hidden_states: torch.FloatTensor,
287
+ attention_mask: torch.FloatTensor,
288
+ layer_head_mask: torch.FloatTensor,
289
+ output_attentions: Optional[bool] = False,
290
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
291
+ """
292
+ Args:
293
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
294
+ attention_mask (`torch.FloatTensor`): attention mask of size
295
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
296
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
297
+ `(encoder_attention_heads,)`.
298
+ output_attentions (`bool`, *optional*):
299
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
300
+ returned tensors for more detail.
301
+ """
302
+ residual = hidden_states
303
+ hidden_states, attn_weights, _ = self.self_attn(
304
+ hidden_states=hidden_states,
305
+ attention_mask=attention_mask,
306
+ layer_head_mask=layer_head_mask,
307
+ output_attentions=output_attentions,
308
+ )
309
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
310
+ hidden_states = residual + hidden_states
311
+ hidden_states = self.self_attn_layer_norm(hidden_states)
312
+
313
+ residual = hidden_states
314
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
315
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
316
+ hidden_states = self.fc2(hidden_states)
317
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
318
+ hidden_states = residual + hidden_states
319
+ hidden_states = self.final_layer_norm(hidden_states)
320
+
321
+ if hidden_states.dtype == torch.float16 and (
322
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
323
+ ):
324
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
325
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
326
+
327
+ outputs = (hidden_states,)
328
+
329
+ if output_attentions:
330
+ outputs += (attn_weights,)
331
+
332
+ return outputs
333
+
334
+
335
+ # TODO: Implement attention with SDPA for PLBart.
336
+ PLBART_ATTENTION_CLASSES = {"eager": PLBartAttention}
337
+
338
+
339
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->PLBart, BART->PLBART
340
+ class PLBartDecoderLayer(nn.Module):
341
+ def __init__(self, config: PLBartConfig):
342
+ super().__init__()
343
+ self.embed_dim = config.d_model
344
+
345
+ self.self_attn = PLBART_ATTENTION_CLASSES[config._attn_implementation](
346
+ embed_dim=self.embed_dim,
347
+ num_heads=config.decoder_attention_heads,
348
+ dropout=config.attention_dropout,
349
+ is_decoder=True,
350
+ is_causal=True,
351
+ config=config,
352
+ )
353
+ self.dropout = config.dropout
354
+ self.activation_fn = ACT2FN[config.activation_function]
355
+ self.activation_dropout = config.activation_dropout
356
+
357
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
358
+ self.encoder_attn = PLBART_ATTENTION_CLASSES[config._attn_implementation](
359
+ self.embed_dim,
360
+ config.decoder_attention_heads,
361
+ dropout=config.attention_dropout,
362
+ is_decoder=True,
363
+ config=config,
364
+ )
365
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
366
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
367
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
368
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
369
+
370
+ def forward(
371
+ self,
372
+ hidden_states: torch.Tensor,
373
+ attention_mask: Optional[torch.Tensor] = None,
374
+ encoder_hidden_states: Optional[torch.Tensor] = None,
375
+ encoder_attention_mask: Optional[torch.Tensor] = None,
376
+ layer_head_mask: Optional[torch.Tensor] = None,
377
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
378
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
379
+ output_attentions: Optional[bool] = False,
380
+ use_cache: Optional[bool] = True,
381
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
382
+ """
383
+ Args:
384
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
385
+ attention_mask (`torch.FloatTensor`): attention mask of size
386
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
387
+ encoder_hidden_states (`torch.FloatTensor`):
388
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
389
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
390
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
391
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
392
+ `(encoder_attention_heads,)`.
393
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
394
+ size `(decoder_attention_heads,)`.
395
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
396
+ output_attentions (`bool`, *optional*):
397
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
398
+ returned tensors for more detail.
399
+ """
400
+ residual = hidden_states
401
+
402
+ # Self Attention
403
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
404
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
405
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
406
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
407
+ hidden_states=hidden_states,
408
+ past_key_value=self_attn_past_key_value,
409
+ attention_mask=attention_mask,
410
+ layer_head_mask=layer_head_mask,
411
+ output_attentions=output_attentions,
412
+ )
413
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
414
+ hidden_states = residual + hidden_states
415
+ hidden_states = self.self_attn_layer_norm(hidden_states)
416
+
417
+ # Cross-Attention Block
418
+ cross_attn_present_key_value = None
419
+ cross_attn_weights = None
420
+ if encoder_hidden_states is not None:
421
+ residual = hidden_states
422
+
423
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
424
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
425
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
426
+ hidden_states=hidden_states,
427
+ key_value_states=encoder_hidden_states,
428
+ attention_mask=encoder_attention_mask,
429
+ layer_head_mask=cross_attn_layer_head_mask,
430
+ past_key_value=cross_attn_past_key_value,
431
+ output_attentions=output_attentions,
432
+ )
433
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
434
+ hidden_states = residual + hidden_states
435
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
436
+
437
+ # add cross-attn to positions 3,4 of present_key_value tuple
438
+ present_key_value = present_key_value + cross_attn_present_key_value
439
+
440
+ # Fully Connected
441
+ residual = hidden_states
442
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
443
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
444
+ hidden_states = self.fc2(hidden_states)
445
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
446
+ hidden_states = residual + hidden_states
447
+ hidden_states = self.final_layer_norm(hidden_states)
448
+
449
+ outputs = (hidden_states,)
450
+
451
+ if output_attentions:
452
+ outputs += (self_attn_weights, cross_attn_weights)
453
+
454
+ if use_cache:
455
+ outputs += (present_key_value,)
456
+
457
+ return outputs
458
+
459
+
460
+ # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->PLBart
461
+ class PLBartClassificationHead(nn.Module):
462
+ """Head for sentence-level classification tasks."""
463
+
464
+ def __init__(
465
+ self,
466
+ input_dim: int,
467
+ inner_dim: int,
468
+ num_classes: int,
469
+ pooler_dropout: float,
470
+ ):
471
+ super().__init__()
472
+ self.dense = nn.Linear(input_dim, inner_dim)
473
+ self.dropout = nn.Dropout(p=pooler_dropout)
474
+ self.out_proj = nn.Linear(inner_dim, num_classes)
475
+
476
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
477
+ hidden_states = self.dropout(hidden_states)
478
+ hidden_states = self.dense(hidden_states)
479
+ hidden_states = torch.tanh(hidden_states)
480
+ hidden_states = self.dropout(hidden_states)
481
+ hidden_states = self.out_proj(hidden_states)
482
+ return hidden_states
483
+
484
+
485
+ class PLBartPreTrainedModel(PreTrainedModel):
486
+ config_class = PLBartConfig
487
+ base_model_prefix = "model"
488
+ supports_gradient_checkpointing = True
489
+ _no_split_modules = ["PLBartDecoderLayer", "PLBartEncoderLayer"]
490
+
491
+ def _init_weights(self, module):
492
+ std = self.config.init_std
493
+ if isinstance(module, nn.Linear):
494
+ module.weight.data.normal_(mean=0.0, std=std)
495
+ if module.bias is not None:
496
+ module.bias.data.zero_()
497
+ elif isinstance(module, nn.Embedding):
498
+ module.weight.data.normal_(mean=0.0, std=std)
499
+ if module.padding_idx is not None:
500
+ module.weight.data[module.padding_idx].zero_()
501
+
502
+
503
+ PLBART_START_DOCSTRING = r"""
504
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
505
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
506
+ etc.)
507
+
508
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
509
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
510
+ and behavior.
511
+
512
+ Parameters:
513
+ config ([`PLBartConfig`]):
514
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
515
+ load the weights associated with the model, only the configuration. Check out the
516
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
517
+ """
518
+
519
+ PLBART_GENERATION_EXAMPLE = r"""
520
+ Mask-filling example:
521
+
522
+ ```python
523
+ >>> from transformers import AutoTokenizer, PLBartForConditionalGeneration
524
+
525
+ >>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base")
526
+ >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
527
+
528
+ >>> # en_XX is the language symbol id <LID> for English
529
+ >>> TXT = "<s> Is 0 the <mask> Fibonacci number ? </s> en_XX"
530
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids
531
+
532
+ >>> logits = model(input_ids).logits
533
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
534
+ >>> probs = logits[0, masked_index].softmax(dim=0)
535
+ >>> values, predictions = probs.topk(5)
536
+
537
+ >>> tokenizer.decode(predictions).split()
538
+ ['first', 'same', 'highest', 'result', 'number']
539
+ ```
540
+ """
541
+
542
+ PLBART_INPUTS_DOCSTRING = r"""
543
+ Args:
544
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
545
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
546
+ it.
547
+
548
+ Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
549
+ See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
550
+
551
+ [What are input IDs?](../glossary#input-ids)
552
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
553
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
554
+
555
+ - 1 for tokens that are **not masked**,
556
+ - 0 for tokens that are **masked**.
557
+
558
+ [What are attention masks?](../glossary#attention-mask)
559
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
560
+ Indices of decoder input sequence tokens in the vocabulary.
561
+
562
+ Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
563
+ See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
564
+
565
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
566
+
567
+ PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
568
+ varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If
569
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
570
+ `past_key_values`).
571
+
572
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
573
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
574
+ for denoising pre-training following the paper.
575
+ decoder_attention_mask (:
576
+ obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior:
577
+ generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.
578
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
579
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
580
+
581
+ - 1 indicates the head is **not masked**,
582
+ - 0 indicates the head is **masked**.
583
+
584
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
585
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
586
+
587
+ - 1 indicates the head is **not masked**,
588
+ - 0 indicates the head is **masked**.
589
+
590
+ cross_attn_head_mask (:
591
+ obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify
592
+ selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`:
593
+
594
+ - 1 indicates the head is **not masked**,
595
+ - 0 indicates the head is **masked**.
596
+
597
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
598
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
599
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
600
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
601
+ past_key_values (:
602
+ obj:*tuple(tuple(torch.FloatTensor))*, *optional*, returned when `use_cache=True` is passed or when
603
+ `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple
604
+ having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
605
+ tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
606
+
607
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
608
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
609
+
610
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
611
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
612
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
613
+ inputs_embeds (:
614
+ obj:*torch.FloatTensor* of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally,
615
+ instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful
616
+ if you want more control over how to convert `input_ids` indices into associated vectors than the model's
617
+ internal embedding lookup matrix.
618
+ decoder_inputs_embeds (:
619
+ obj:*torch.FloatTensor* of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
620
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
621
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
622
+ input (see `past_key_values`). This is useful if you want more control over how to convert
623
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
624
+
625
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
626
+ of `inputs_embeds`.
627
+ use_cache (`bool`, *optional*):
628
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
629
+ `past_key_values`).
630
+ output_attentions (`bool`, *optional*):
631
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
632
+ tensors for more detail.
633
+ output_hidden_states (`bool`, *optional*):
634
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
635
+ more detail.
636
+ return_dict (`bool`, *optional*):
637
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
638
+ """
639
+
640
+
641
+ # Copied from transformers.models.bart.modeling_bart.BartEncoder with Bart->PLBart
642
+ class PLBartEncoder(PLBartPreTrainedModel):
643
+ """
644
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
645
+ [`PLBartEncoderLayer`].
646
+
647
+ Args:
648
+ config: PLBartConfig
649
+ embed_tokens (nn.Embedding): output embedding
650
+ """
651
+
652
+ def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding] = None):
653
+ super().__init__(config)
654
+
655
+ self.dropout = config.dropout
656
+ self.layerdrop = config.encoder_layerdrop
657
+
658
+ embed_dim = config.d_model
659
+ self.padding_idx = config.pad_token_id
660
+ self.max_source_positions = config.max_position_embeddings
661
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
662
+
663
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
664
+
665
+ if embed_tokens is not None:
666
+ self.embed_tokens.weight = embed_tokens.weight
667
+
668
+ self.embed_positions = PLBartLearnedPositionalEmbedding(
669
+ config.max_position_embeddings,
670
+ embed_dim,
671
+ )
672
+ self.layers = nn.ModuleList([PLBartEncoderLayer(config) for _ in range(config.encoder_layers)])
673
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
674
+ self._use_sdpa = config._attn_implementation == "sdpa"
675
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
676
+
677
+ self.gradient_checkpointing = False
678
+ # Initialize weights and apply final processing
679
+ self.post_init()
680
+
681
+ def get_input_embeddings(self):
682
+ return self.embed_tokens
683
+
684
+ def set_input_embeddings(self, value):
685
+ self.embed_tokens = value
686
+
687
+ def forward(
688
+ self,
689
+ input_ids: torch.LongTensor = None,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ head_mask: Optional[torch.Tensor] = None,
692
+ inputs_embeds: Optional[torch.FloatTensor] = None,
693
+ output_attentions: Optional[bool] = None,
694
+ output_hidden_states: Optional[bool] = None,
695
+ return_dict: Optional[bool] = None,
696
+ ) -> Union[Tuple, BaseModelOutput]:
697
+ r"""
698
+ Args:
699
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
700
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
701
+ provide it.
702
+
703
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
704
+ [`PreTrainedTokenizer.__call__`] for details.
705
+
706
+ [What are input IDs?](../glossary#input-ids)
707
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
708
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
709
+
710
+ - 1 for tokens that are **not masked**,
711
+ - 0 for tokens that are **masked**.
712
+
713
+ [What are attention masks?](../glossary#attention-mask)
714
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
715
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
716
+
717
+ - 1 indicates the head is **not masked**,
718
+ - 0 indicates the head is **masked**.
719
+
720
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
721
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
722
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
723
+ than the model's internal embedding lookup matrix.
724
+ output_attentions (`bool`, *optional*):
725
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
726
+ returned tensors for more detail.
727
+ output_hidden_states (`bool`, *optional*):
728
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
729
+ for more detail.
730
+ return_dict (`bool`, *optional*):
731
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
732
+ """
733
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
734
+ output_hidden_states = (
735
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
736
+ )
737
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
738
+
739
+ # retrieve input_ids and inputs_embeds
740
+ if input_ids is not None and inputs_embeds is not None:
741
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
742
+ elif input_ids is not None:
743
+ input = input_ids
744
+ input_ids = input_ids.view(-1, input_ids.shape[-1])
745
+ elif inputs_embeds is not None:
746
+ input = inputs_embeds[:, :, -1]
747
+ else:
748
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
749
+
750
+ if inputs_embeds is None:
751
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
752
+
753
+ embed_pos = self.embed_positions(input)
754
+ embed_pos = embed_pos.to(inputs_embeds.device)
755
+
756
+ hidden_states = inputs_embeds + embed_pos
757
+ hidden_states = self.layernorm_embedding(hidden_states)
758
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
759
+
760
+ # expand attention_mask
761
+ if attention_mask is not None:
762
+ if self._use_flash_attention_2:
763
+ attention_mask = attention_mask if 0 in attention_mask else None
764
+ elif self._use_sdpa and head_mask is None and not output_attentions:
765
+ # output_attentions=True & head_mask can not be supported when using SDPA, fall back to
766
+ # the manual implementation that requires a 4D causal mask in all cases.
767
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
768
+ attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
769
+ else:
770
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
771
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
772
+
773
+ encoder_states = () if output_hidden_states else None
774
+ all_attentions = () if output_attentions else None
775
+
776
+ # check if head_mask has a correct number of layers specified if desired
777
+ if head_mask is not None:
778
+ if head_mask.size()[0] != (len(self.layers)):
779
+ raise ValueError(
780
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
781
+ f" {head_mask.size()[0]}."
782
+ )
783
+
784
+ for idx, encoder_layer in enumerate(self.layers):
785
+ if output_hidden_states:
786
+ encoder_states = encoder_states + (hidden_states,)
787
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
788
+ to_drop = False
789
+ if self.training:
790
+ dropout_probability = torch.rand([])
791
+ if dropout_probability < self.layerdrop: # skip the layer
792
+ to_drop = True
793
+
794
+ if to_drop:
795
+ layer_outputs = (None, None)
796
+ else:
797
+ if self.gradient_checkpointing and self.training:
798
+ layer_outputs = self._gradient_checkpointing_func(
799
+ encoder_layer.__call__,
800
+ hidden_states,
801
+ attention_mask,
802
+ (head_mask[idx] if head_mask is not None else None),
803
+ output_attentions,
804
+ )
805
+ else:
806
+ layer_outputs = encoder_layer(
807
+ hidden_states,
808
+ attention_mask,
809
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
810
+ output_attentions=output_attentions,
811
+ )
812
+
813
+ hidden_states = layer_outputs[0]
814
+
815
+ if output_attentions:
816
+ all_attentions = all_attentions + (layer_outputs[1],)
817
+
818
+ if output_hidden_states:
819
+ encoder_states = encoder_states + (hidden_states,)
820
+
821
+ if not return_dict:
822
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
823
+ return BaseModelOutput(
824
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
825
+ )
826
+
827
+
828
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder with Bart->PLBart
829
+ class PLBartDecoder(PLBartPreTrainedModel):
830
+ """
831
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PLBartDecoderLayer`]
832
+
833
+ Args:
834
+ config: PLBartConfig
835
+ embed_tokens (nn.Embedding): output embedding
836
+ """
837
+
838
+ def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding] = None):
839
+ super().__init__(config)
840
+ self.dropout = config.dropout
841
+ self.layerdrop = config.decoder_layerdrop
842
+ self.padding_idx = config.pad_token_id
843
+ self.max_target_positions = config.max_position_embeddings
844
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
845
+
846
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
847
+
848
+ if embed_tokens is not None:
849
+ self.embed_tokens.weight = embed_tokens.weight
850
+
851
+ self.embed_positions = PLBartLearnedPositionalEmbedding(
852
+ config.max_position_embeddings,
853
+ config.d_model,
854
+ )
855
+ self.layers = nn.ModuleList([PLBartDecoderLayer(config) for _ in range(config.decoder_layers)])
856
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
857
+ self._use_sdpa = config._attn_implementation == "sdpa"
858
+
859
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
860
+
861
+ self.gradient_checkpointing = False
862
+ # Initialize weights and apply final processing
863
+ self.post_init()
864
+
865
+ def get_input_embeddings(self):
866
+ return self.embed_tokens
867
+
868
+ def set_input_embeddings(self, value):
869
+ self.embed_tokens = value
870
+
871
+ def forward(
872
+ self,
873
+ input_ids: torch.LongTensor = None,
874
+ attention_mask: Optional[torch.Tensor] = None,
875
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
876
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
877
+ head_mask: Optional[torch.Tensor] = None,
878
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
879
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
880
+ inputs_embeds: Optional[torch.FloatTensor] = None,
881
+ use_cache: Optional[bool] = None,
882
+ output_attentions: Optional[bool] = None,
883
+ output_hidden_states: Optional[bool] = None,
884
+ return_dict: Optional[bool] = None,
885
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
886
+ r"""
887
+ Args:
888
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
889
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
890
+ provide it.
891
+
892
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
893
+ [`PreTrainedTokenizer.__call__`] for details.
894
+
895
+ [What are input IDs?](../glossary#input-ids)
896
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
897
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
898
+
899
+ - 1 for tokens that are **not masked**,
900
+ - 0 for tokens that are **masked**.
901
+
902
+ [What are attention masks?](../glossary#attention-mask)
903
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
904
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
905
+ of the decoder.
906
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
907
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
908
+ selected in `[0, 1]`:
909
+
910
+ - 1 for tokens that are **not masked**,
911
+ - 0 for tokens that are **masked**.
912
+
913
+ [What are attention masks?](../glossary#attention-mask)
914
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
915
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
916
+
917
+ - 1 indicates the head is **not masked**,
918
+ - 0 indicates the head is **masked**.
919
+
920
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
921
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
922
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
923
+
924
+ - 1 indicates the head is **not masked**,
925
+ - 0 indicates the head is **masked**.
926
+
927
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
928
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
929
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
930
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
931
+
932
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
933
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
934
+
935
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
936
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
937
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
938
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
939
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
940
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
941
+ than the model's internal embedding lookup matrix.
942
+ output_attentions (`bool`, *optional*):
943
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
944
+ returned tensors for more detail.
945
+ output_hidden_states (`bool`, *optional*):
946
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
947
+ for more detail.
948
+ return_dict (`bool`, *optional*):
949
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
950
+ """
951
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
952
+ output_hidden_states = (
953
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
954
+ )
955
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
956
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
957
+
958
+ # retrieve input_ids and inputs_embeds
959
+ if input_ids is not None and inputs_embeds is not None:
960
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
961
+ elif input_ids is not None:
962
+ input = input_ids
963
+ input_shape = input.shape
964
+ input_ids = input_ids.view(-1, input_shape[-1])
965
+ elif inputs_embeds is not None:
966
+ input_shape = inputs_embeds.size()[:-1]
967
+ input = inputs_embeds[:, :, -1]
968
+ else:
969
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
970
+
971
+ # past_key_values_length
972
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
973
+
974
+ if inputs_embeds is None:
975
+ inputs_embeds = self.embed_tokens(input) * self.embed_scale
976
+
977
+ if self._use_flash_attention_2:
978
+ # 2d mask is passed through the layers
979
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
980
+ elif self._use_sdpa and not output_attentions and cross_attn_head_mask is None:
981
+ # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
982
+ # the manual implementation that requires a 4D causal mask in all cases.
983
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
984
+ attention_mask,
985
+ input_shape,
986
+ inputs_embeds,
987
+ past_key_values_length,
988
+ )
989
+ else:
990
+ # 4d mask is passed through the layers
991
+ attention_mask = _prepare_4d_causal_attention_mask(
992
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
993
+ )
994
+
995
+ # expand encoder attention mask
996
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
997
+ if self._use_flash_attention_2:
998
+ encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
999
+ elif self._use_sdpa and cross_attn_head_mask is None and not output_attentions:
1000
+ # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
1001
+ # the manual implementation that requires a 4D causal mask in all cases.
1002
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1003
+ encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
1004
+ encoder_attention_mask,
1005
+ inputs_embeds.dtype,
1006
+ tgt_len=input_shape[-1],
1007
+ )
1008
+ else:
1009
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1010
+ encoder_attention_mask = _prepare_4d_attention_mask(
1011
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
1012
+ )
1013
+
1014
+ # embed positions
1015
+ positions = self.embed_positions(input, past_key_values_length)
1016
+ positions = positions.to(inputs_embeds.device)
1017
+
1018
+ hidden_states = inputs_embeds + positions
1019
+ hidden_states = self.layernorm_embedding(hidden_states)
1020
+
1021
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1022
+
1023
+ if self.gradient_checkpointing and self.training:
1024
+ if use_cache:
1025
+ logger.warning_once(
1026
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1027
+ )
1028
+ use_cache = False
1029
+
1030
+ # decoder layers
1031
+ all_hidden_states = () if output_hidden_states else None
1032
+ all_self_attns = () if output_attentions else None
1033
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
1034
+ next_decoder_cache = () if use_cache else None
1035
+
1036
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1037
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1038
+ if attn_mask is not None:
1039
+ if attn_mask.size()[0] != (len(self.layers)):
1040
+ raise ValueError(
1041
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1042
+ f" {head_mask.size()[0]}."
1043
+ )
1044
+
1045
+ for idx, decoder_layer in enumerate(self.layers):
1046
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1047
+ if output_hidden_states:
1048
+ all_hidden_states += (hidden_states,)
1049
+ if self.training:
1050
+ dropout_probability = torch.rand([])
1051
+ if dropout_probability < self.layerdrop:
1052
+ continue
1053
+
1054
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1055
+
1056
+ if self.gradient_checkpointing and self.training:
1057
+ layer_outputs = self._gradient_checkpointing_func(
1058
+ decoder_layer.__call__,
1059
+ hidden_states,
1060
+ attention_mask,
1061
+ encoder_hidden_states,
1062
+ encoder_attention_mask,
1063
+ head_mask[idx] if head_mask is not None else None,
1064
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1065
+ None,
1066
+ output_attentions,
1067
+ use_cache,
1068
+ )
1069
+ else:
1070
+ layer_outputs = decoder_layer(
1071
+ hidden_states,
1072
+ attention_mask=attention_mask,
1073
+ encoder_hidden_states=encoder_hidden_states,
1074
+ encoder_attention_mask=encoder_attention_mask,
1075
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1076
+ cross_attn_layer_head_mask=(
1077
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1078
+ ),
1079
+ past_key_value=past_key_value,
1080
+ output_attentions=output_attentions,
1081
+ use_cache=use_cache,
1082
+ )
1083
+ hidden_states = layer_outputs[0]
1084
+
1085
+ if use_cache:
1086
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1087
+
1088
+ if output_attentions:
1089
+ all_self_attns += (layer_outputs[1],)
1090
+
1091
+ if encoder_hidden_states is not None:
1092
+ all_cross_attentions += (layer_outputs[2],)
1093
+
1094
+ # add hidden states from the last decoder layer
1095
+ if output_hidden_states:
1096
+ all_hidden_states += (hidden_states,)
1097
+
1098
+ next_cache = next_decoder_cache if use_cache else None
1099
+ if not return_dict:
1100
+ return tuple(
1101
+ v
1102
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1103
+ if v is not None
1104
+ )
1105
+ return BaseModelOutputWithPastAndCrossAttentions(
1106
+ last_hidden_state=hidden_states,
1107
+ past_key_values=next_cache,
1108
+ hidden_states=all_hidden_states,
1109
+ attentions=all_self_attns,
1110
+ cross_attentions=all_cross_attentions,
1111
+ )
1112
+
1113
+
1114
+ @add_start_docstrings(
1115
+ "The bare PLBART Model outputting raw hidden-states without any specific head on top.",
1116
+ PLBART_START_DOCSTRING,
1117
+ )
1118
+ class PLBartModel(PLBartPreTrainedModel):
1119
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1120
+
1121
+ def __init__(self, config: PLBartConfig):
1122
+ super().__init__(config)
1123
+
1124
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1125
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1126
+
1127
+ self.encoder = PLBartEncoder(config, self.shared)
1128
+ self.decoder = PLBartDecoder(config, self.shared)
1129
+
1130
+ self.init_weights()
1131
+
1132
+ def get_input_embeddings(self):
1133
+ return self.shared
1134
+
1135
+ def set_input_embeddings(self, value):
1136
+ self.shared = value
1137
+ self.encoder.embed_tokens = self.shared
1138
+ self.decoder.embed_tokens = self.shared
1139
+
1140
+ def _tie_weights(self):
1141
+ if self.config.tie_word_embeddings:
1142
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
1143
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
1144
+
1145
+ def get_encoder(self):
1146
+ return self.encoder
1147
+
1148
+ def get_decoder(self):
1149
+ return self.decoder
1150
+
1151
+ @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)
1152
+ @add_code_sample_docstrings(
1153
+ checkpoint=_CHECKPOINT_FOR_DOC,
1154
+ output_type=Seq2SeqModelOutput,
1155
+ config_class=_CONFIG_FOR_DOC,
1156
+ )
1157
+ def forward(
1158
+ self,
1159
+ input_ids: Optional[torch.LongTensor] = None,
1160
+ attention_mask: Optional[torch.LongTensor] = None,
1161
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1162
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1163
+ head_mask: Optional[torch.Tensor] = None,
1164
+ decoder_head_mask: Optional[torch.LongTensor] = None,
1165
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1166
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1167
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1168
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1169
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1170
+ use_cache: Optional[bool] = None,
1171
+ output_attentions: Optional[bool] = None,
1172
+ output_hidden_states: Optional[bool] = None,
1173
+ return_dict: Optional[bool] = None,
1174
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
1175
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1176
+ output_hidden_states = (
1177
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1178
+ )
1179
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1180
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1181
+
1182
+ # different to other models, PLBart automatically creates decoder_input_ids from
1183
+ # input_ids if no decoder_input_ids are provided
1184
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1185
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
1186
+
1187
+ if encoder_outputs is None:
1188
+ encoder_outputs = self.encoder(
1189
+ input_ids=input_ids,
1190
+ attention_mask=attention_mask,
1191
+ head_mask=head_mask,
1192
+ inputs_embeds=inputs_embeds,
1193
+ output_attentions=output_attentions,
1194
+ output_hidden_states=output_hidden_states,
1195
+ return_dict=return_dict,
1196
+ )
1197
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1198
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1199
+ encoder_outputs = BaseModelOutput(
1200
+ last_hidden_state=encoder_outputs[0],
1201
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1202
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1203
+ )
1204
+
1205
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1206
+ decoder_outputs = self.decoder(
1207
+ input_ids=decoder_input_ids,
1208
+ attention_mask=decoder_attention_mask,
1209
+ encoder_hidden_states=encoder_outputs[0],
1210
+ encoder_attention_mask=attention_mask,
1211
+ head_mask=decoder_head_mask,
1212
+ cross_attn_head_mask=cross_attn_head_mask,
1213
+ past_key_values=past_key_values,
1214
+ inputs_embeds=decoder_inputs_embeds,
1215
+ use_cache=use_cache,
1216
+ output_attentions=output_attentions,
1217
+ output_hidden_states=output_hidden_states,
1218
+ return_dict=return_dict,
1219
+ )
1220
+
1221
+ if not return_dict:
1222
+ return decoder_outputs + encoder_outputs
1223
+
1224
+ return Seq2SeqModelOutput(
1225
+ last_hidden_state=decoder_outputs.last_hidden_state,
1226
+ past_key_values=decoder_outputs.past_key_values,
1227
+ decoder_hidden_states=decoder_outputs.hidden_states,
1228
+ decoder_attentions=decoder_outputs.attentions,
1229
+ cross_attentions=decoder_outputs.cross_attentions,
1230
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1231
+ encoder_hidden_states=encoder_outputs.hidden_states,
1232
+ encoder_attentions=encoder_outputs.attentions,
1233
+ )
1234
+
1235
+
1236
+ @add_start_docstrings(
1237
+ "The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code.",
1238
+ PLBART_START_DOCSTRING,
1239
+ )
1240
+ class PLBartForConditionalGeneration(PLBartPreTrainedModel):
1241
+ base_model_prefix = "model"
1242
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
1243
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
1244
+
1245
+ def __init__(self, config: PLBartConfig):
1246
+ super().__init__(config)
1247
+ self.model = PLBartModel(config)
1248
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
1249
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1250
+
1251
+ self.init_weights()
1252
+
1253
+ def get_encoder(self):
1254
+ return self.model.get_encoder()
1255
+
1256
+ def get_decoder(self):
1257
+ return self.model.get_decoder()
1258
+
1259
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1260
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1261
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1262
+ return new_embeddings
1263
+
1264
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1265
+ old_num_tokens = self.final_logits_bias.shape[-1]
1266
+ if new_num_tokens <= old_num_tokens:
1267
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1268
+ else:
1269
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1270
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1271
+ self.register_buffer("final_logits_bias", new_bias)
1272
+
1273
+ def get_output_embeddings(self):
1274
+ return self.lm_head
1275
+
1276
+ def set_output_embeddings(self, new_embeddings):
1277
+ self.lm_head = new_embeddings
1278
+
1279
+ @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)
1280
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1281
+ @add_end_docstrings(PLBART_GENERATION_EXAMPLE)
1282
+ def forward(
1283
+ self,
1284
+ input_ids: Optional[torch.LongTensor] = None,
1285
+ attention_mask: Optional[torch.LongTensor] = None,
1286
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1287
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1288
+ head_mask: Optional[torch.Tensor] = None,
1289
+ decoder_head_mask: Optional[torch.LongTensor] = None,
1290
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1291
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1292
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1293
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1294
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1295
+ labels: Optional[torch.Tensor] = None,
1296
+ use_cache: Optional[bool] = None,
1297
+ output_attentions: Optional[bool] = None,
1298
+ output_hidden_states: Optional[bool] = None,
1299
+ return_dict: Optional[bool] = None,
1300
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
1301
+ r"""
1302
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1303
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1304
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1305
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1306
+
1307
+ Returns:
1308
+
1309
+ """
1310
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1311
+
1312
+ if labels is not None:
1313
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1314
+ decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
1315
+
1316
+ outputs = self.model(
1317
+ input_ids,
1318
+ attention_mask=attention_mask,
1319
+ decoder_input_ids=decoder_input_ids,
1320
+ encoder_outputs=encoder_outputs,
1321
+ decoder_attention_mask=decoder_attention_mask,
1322
+ head_mask=head_mask,
1323
+ decoder_head_mask=decoder_head_mask,
1324
+ cross_attn_head_mask=cross_attn_head_mask,
1325
+ past_key_values=past_key_values,
1326
+ inputs_embeds=inputs_embeds,
1327
+ decoder_inputs_embeds=decoder_inputs_embeds,
1328
+ use_cache=use_cache,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ return_dict=return_dict,
1332
+ )
1333
+ lm_logits = self.lm_head(outputs[0])
1334
+ lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device)
1335
+
1336
+ masked_lm_loss = None
1337
+ if labels is not None:
1338
+ loss_fct = CrossEntropyLoss()
1339
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1340
+
1341
+ if not return_dict:
1342
+ output = (lm_logits,) + outputs[1:]
1343
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1344
+
1345
+ return Seq2SeqLMOutput(
1346
+ loss=masked_lm_loss,
1347
+ logits=lm_logits,
1348
+ past_key_values=outputs.past_key_values,
1349
+ decoder_hidden_states=outputs.decoder_hidden_states,
1350
+ decoder_attentions=outputs.decoder_attentions,
1351
+ cross_attentions=outputs.cross_attentions,
1352
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1353
+ encoder_hidden_states=outputs.encoder_hidden_states,
1354
+ encoder_attentions=outputs.encoder_attentions,
1355
+ )
1356
+
1357
+ def prepare_inputs_for_generation(
1358
+ self,
1359
+ decoder_input_ids: torch.LongTensor,
1360
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1361
+ attention_mask: Optional[torch.LongTensor] = None,
1362
+ head_mask: Optional[torch.Tensor] = None,
1363
+ decoder_head_mask: Optional[torch.Tensor] = None,
1364
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1365
+ use_cache: Optional[bool] = None,
1366
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1367
+ **kwargs, # TODO: Check if this is needed. It is unused?
1368
+ ) -> Dict[str, Any]:
1369
+ # cut decoder_input_ids if past is used
1370
+ if past_key_values is not None:
1371
+ past_length = past_key_values[0][0].shape[2]
1372
+
1373
+ # Some generation methods already pass only the last input ID
1374
+ if decoder_input_ids.shape[1] > past_length:
1375
+ remove_prefix_length = past_length
1376
+ else:
1377
+ # Default to old behavior: keep only final ID
1378
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1379
+
1380
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1381
+
1382
+ return {
1383
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1384
+ "encoder_outputs": encoder_outputs,
1385
+ "past_key_values": past_key_values,
1386
+ "decoder_input_ids": decoder_input_ids,
1387
+ "attention_mask": attention_mask,
1388
+ "head_mask": head_mask,
1389
+ "decoder_head_mask": decoder_head_mask,
1390
+ "cross_attn_head_mask": cross_attn_head_mask,
1391
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1392
+ }
1393
+
1394
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1395
+ return shift_tokens_right(labels, self.config.pad_token_id)
1396
+
1397
+ @staticmethod
1398
+ def _reorder_cache(past_key_values, beam_idx):
1399
+ reordered_past = ()
1400
+ for layer_past in past_key_values:
1401
+ # cached cross_attention states don't have to be reordered -> they are always the same
1402
+ reordered_past += (
1403
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1404
+ + layer_past[2:],
1405
+ )
1406
+ return reordered_past
1407
+
1408
+
1409
+ @add_start_docstrings(
1410
+ """
1411
+ PLBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for code
1412
+ classification.
1413
+ """,
1414
+ PLBART_START_DOCSTRING,
1415
+ )
1416
+ class PLBartForSequenceClassification(PLBartPreTrainedModel):
1417
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1418
+
1419
+ def __init__(self, config: PLBartConfig, **kwargs):
1420
+ super().__init__(config, **kwargs)
1421
+ self.model = PLBartModel(config)
1422
+ self.classification_head = PLBartClassificationHead(
1423
+ config.d_model,
1424
+ config.d_model,
1425
+ config.num_labels,
1426
+ config.classifier_dropout,
1427
+ )
1428
+
1429
+ # Initialize weights and apply final processing
1430
+ self.post_init()
1431
+
1432
+ @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)
1433
+ @add_code_sample_docstrings(
1434
+ checkpoint=_CHECKPOINT_FOR_DOC,
1435
+ output_type=Seq2SeqSequenceClassifierOutput,
1436
+ config_class=_CONFIG_FOR_DOC,
1437
+ )
1438
+ # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward
1439
+ def forward(
1440
+ self,
1441
+ input_ids: torch.LongTensor = None,
1442
+ attention_mask: Optional[torch.Tensor] = None,
1443
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1444
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1445
+ head_mask: Optional[torch.Tensor] = None,
1446
+ decoder_head_mask: Optional[torch.Tensor] = None,
1447
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1448
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1449
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1450
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1451
+ labels: Optional[torch.LongTensor] = None,
1452
+ use_cache: Optional[bool] = None,
1453
+ output_attentions: Optional[bool] = None,
1454
+ output_hidden_states: Optional[bool] = None,
1455
+ return_dict: Optional[bool] = None,
1456
+ ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
1457
+ r"""
1458
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1459
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1460
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1461
+ """
1462
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1463
+ if labels is not None:
1464
+ use_cache = False
1465
+
1466
+ if input_ids is None and inputs_embeds is not None:
1467
+ raise NotImplementedError(
1468
+ f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
1469
+ )
1470
+
1471
+ outputs = self.model(
1472
+ input_ids,
1473
+ attention_mask=attention_mask,
1474
+ decoder_input_ids=decoder_input_ids,
1475
+ decoder_attention_mask=decoder_attention_mask,
1476
+ head_mask=head_mask,
1477
+ decoder_head_mask=decoder_head_mask,
1478
+ cross_attn_head_mask=cross_attn_head_mask,
1479
+ encoder_outputs=encoder_outputs,
1480
+ inputs_embeds=inputs_embeds,
1481
+ decoder_inputs_embeds=decoder_inputs_embeds,
1482
+ use_cache=use_cache,
1483
+ output_attentions=output_attentions,
1484
+ output_hidden_states=output_hidden_states,
1485
+ return_dict=return_dict,
1486
+ )
1487
+ hidden_states = outputs[0] # last hidden state
1488
+
1489
+ eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
1490
+
1491
+ if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
1492
+ raise ValueError("All examples must have the same number of <eos> tokens.")
1493
+ sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
1494
+ :, -1, :
1495
+ ]
1496
+ logits = self.classification_head(sentence_representation)
1497
+
1498
+ loss = None
1499
+ if labels is not None:
1500
+ labels = labels.to(logits.device)
1501
+ if self.config.problem_type is None:
1502
+ if self.config.num_labels == 1:
1503
+ self.config.problem_type = "regression"
1504
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1505
+ self.config.problem_type = "single_label_classification"
1506
+ else:
1507
+ self.config.problem_type = "multi_label_classification"
1508
+
1509
+ if self.config.problem_type == "regression":
1510
+ loss_fct = MSELoss()
1511
+ if self.config.num_labels == 1:
1512
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1513
+ else:
1514
+ loss = loss_fct(logits, labels)
1515
+ elif self.config.problem_type == "single_label_classification":
1516
+ loss_fct = CrossEntropyLoss()
1517
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1518
+ elif self.config.problem_type == "multi_label_classification":
1519
+ loss_fct = BCEWithLogitsLoss()
1520
+ loss = loss_fct(logits, labels)
1521
+ if not return_dict:
1522
+ output = (logits,) + outputs[1:]
1523
+ return ((loss,) + output) if loss is not None else output
1524
+
1525
+ return Seq2SeqSequenceClassifierOutput(
1526
+ loss=loss,
1527
+ logits=logits,
1528
+ past_key_values=outputs.past_key_values,
1529
+ decoder_hidden_states=outputs.decoder_hidden_states,
1530
+ decoder_attentions=outputs.decoder_attentions,
1531
+ cross_attentions=outputs.cross_attentions,
1532
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1533
+ encoder_hidden_states=outputs.encoder_hidden_states,
1534
+ encoder_attentions=outputs.encoder_attentions,
1535
+ )
1536
+
1537
+
1538
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PLBart
1539
+ class PLBartDecoderWrapper(PLBartPreTrainedModel):
1540
+ """
1541
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1542
+ used in combination with the [`EncoderDecoderModel`] framework.
1543
+ """
1544
+
1545
+ def __init__(self, config):
1546
+ super().__init__(config)
1547
+ self.decoder = PLBartDecoder(config)
1548
+
1549
+ def forward(self, *args, **kwargs):
1550
+ return self.decoder(*args, **kwargs)
1551
+
1552
+
1553
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->PLBart, facebook/bart-base->uclanlp/plbart-base
1554
+ class PLBartForCausalLM(PLBartPreTrainedModel):
1555
+ _tied_weights_keys = ["lm_head.weight"]
1556
+
1557
+ def __init__(self, config):
1558
+ config = copy.deepcopy(config)
1559
+ config.is_decoder = True
1560
+ config.is_encoder_decoder = False
1561
+ super().__init__(config)
1562
+ self.model = PLBartDecoderWrapper(config)
1563
+
1564
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1565
+
1566
+ # Initialize weights and apply final processing
1567
+ self.post_init()
1568
+
1569
+ def get_input_embeddings(self):
1570
+ return self.model.decoder.embed_tokens
1571
+
1572
+ def set_input_embeddings(self, value):
1573
+ self.model.decoder.embed_tokens = value
1574
+
1575
+ def get_output_embeddings(self):
1576
+ return self.lm_head
1577
+
1578
+ def set_output_embeddings(self, new_embeddings):
1579
+ self.lm_head = new_embeddings
1580
+
1581
+ def set_decoder(self, decoder):
1582
+ self.model.decoder = decoder
1583
+
1584
+ def get_decoder(self):
1585
+ return self.model.decoder
1586
+
1587
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1588
+ def forward(
1589
+ self,
1590
+ input_ids: torch.LongTensor = None,
1591
+ attention_mask: Optional[torch.Tensor] = None,
1592
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1593
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1594
+ head_mask: Optional[torch.Tensor] = None,
1595
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1596
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1597
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1598
+ labels: Optional[torch.LongTensor] = None,
1599
+ use_cache: Optional[bool] = None,
1600
+ output_attentions: Optional[bool] = None,
1601
+ output_hidden_states: Optional[bool] = None,
1602
+ return_dict: Optional[bool] = None,
1603
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1604
+ r"""
1605
+ Args:
1606
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1607
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1608
+ provide it.
1609
+
1610
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1611
+ [`PreTrainedTokenizer.__call__`] for details.
1612
+
1613
+ [What are input IDs?](../glossary#input-ids)
1614
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1615
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1616
+
1617
+ - 1 for tokens that are **not masked**,
1618
+ - 0 for tokens that are **masked**.
1619
+
1620
+ [What are attention masks?](../glossary#attention-mask)
1621
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1622
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1623
+ if the model is configured as a decoder.
1624
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1625
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
1626
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1627
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1628
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1629
+
1630
+ - 1 indicates the head is **not masked**,
1631
+ - 0 indicates the head is **masked**.
1632
+
1633
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1634
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1635
+
1636
+ - 1 indicates the head is **not masked**,
1637
+ - 0 indicates the head is **masked**.
1638
+
1639
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1640
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1641
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1642
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
1643
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
1644
+
1645
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1646
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1647
+
1648
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1649
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1650
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1651
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1652
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1653
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1654
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1655
+ use_cache (`bool`, *optional*):
1656
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1657
+ (see `past_key_values`).
1658
+
1659
+ - 1 for tokens that are **not masked**,
1660
+ - 0 for tokens that are **masked**.
1661
+ output_attentions (`bool`, *optional*):
1662
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1663
+ returned tensors for more detail.
1664
+ output_hidden_states (`bool`, *optional*):
1665
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1666
+ for more detail.
1667
+ return_dict (`bool`, *optional*):
1668
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1669
+
1670
+ Returns:
1671
+
1672
+ Example:
1673
+
1674
+ ```python
1675
+ >>> from transformers import AutoTokenizer, PLBartForCausalLM
1676
+
1677
+ >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
1678
+ >>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base", add_cross_attention=False)
1679
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
1680
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1681
+ >>> outputs = model(**inputs)
1682
+
1683
+ >>> logits = outputs.logits
1684
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
1685
+ >>> list(logits.shape) == expected_shape
1686
+ True
1687
+ ```"""
1688
+
1689
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1690
+ output_hidden_states = (
1691
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1692
+ )
1693
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1694
+
1695
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1696
+ outputs = self.model.decoder(
1697
+ input_ids=input_ids,
1698
+ attention_mask=attention_mask,
1699
+ encoder_hidden_states=encoder_hidden_states,
1700
+ encoder_attention_mask=encoder_attention_mask,
1701
+ head_mask=head_mask,
1702
+ cross_attn_head_mask=cross_attn_head_mask,
1703
+ past_key_values=past_key_values,
1704
+ inputs_embeds=inputs_embeds,
1705
+ use_cache=use_cache,
1706
+ output_attentions=output_attentions,
1707
+ output_hidden_states=output_hidden_states,
1708
+ return_dict=return_dict,
1709
+ )
1710
+
1711
+ logits = self.lm_head(outputs[0])
1712
+
1713
+ loss = None
1714
+ if labels is not None:
1715
+ labels = labels.to(logits.device)
1716
+ loss_fct = CrossEntropyLoss()
1717
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
1718
+
1719
+ if not return_dict:
1720
+ output = (logits,) + outputs[1:]
1721
+ return (loss,) + output if loss is not None else output
1722
+
1723
+ return CausalLMOutputWithCrossAttentions(
1724
+ loss=loss,
1725
+ logits=logits,
1726
+ past_key_values=outputs.past_key_values,
1727
+ hidden_states=outputs.hidden_states,
1728
+ attentions=outputs.attentions,
1729
+ cross_attentions=outputs.cross_attentions,
1730
+ )
1731
+
1732
+ def prepare_inputs_for_generation(
1733
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1734
+ ):
1735
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1736
+ if attention_mask is None:
1737
+ attention_mask = input_ids.new_ones(input_ids.shape)
1738
+
1739
+ if past_key_values:
1740
+ past_length = past_key_values[0][0].shape[2]
1741
+
1742
+ # Some generation methods already pass only the last input ID
1743
+ if input_ids.shape[1] > past_length:
1744
+ remove_prefix_length = past_length
1745
+ else:
1746
+ # Default to old behavior: keep only final ID
1747
+ remove_prefix_length = input_ids.shape[1] - 1
1748
+
1749
+ input_ids = input_ids[:, remove_prefix_length:]
1750
+ # first step, decoder_cached_states are empty
1751
+ return {
1752
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
1753
+ "attention_mask": attention_mask,
1754
+ "past_key_values": past_key_values,
1755
+ "use_cache": use_cache,
1756
+ }
1757
+
1758
+ @staticmethod
1759
+ def _reorder_cache(past_key_values, beam_idx):
1760
+ reordered_past = ()
1761
+ for layer_past in past_key_values:
1762
+ reordered_past += (
1763
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1764
+ )
1765
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/plbart/tokenization_plbart.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, UCLA NLP, The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = {
34
+ "base": ["__java__", "__python__", "__en_XX__"],
35
+ "multi": ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"],
36
+ }
37
+
38
+ FAIRSEQ_LANGUAGE_CODES_MAP = {
39
+ "java": "__java__",
40
+ "python": "__python__",
41
+ "en_XX": "__en_XX__",
42
+ "javascript": "__javascript__",
43
+ "php": "__php__",
44
+ "ruby": "__ruby__",
45
+ "go": "__go__",
46
+ }
47
+
48
+
49
+ class PLBartTokenizer(PreTrainedTokenizer):
50
+ """
51
+ Construct an PLBART tokenizer.
52
+
53
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
54
+ [SentencePiece](https://github.com/google/sentencepiece).
55
+
56
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
57
+ <tokens> <eos>` for target language documents.
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ Path to the vocabulary file.
62
+ src_lang (`str`, *optional*):
63
+ A string representing the source language.
64
+ tgt_lang (`str`, *optional*):
65
+ A string representing the target language.
66
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
67
+ The start of sequence token.
68
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
69
+ The end of sequence token.
70
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
71
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
72
+ sequence classification or for a text and a question for question answering. It is also used as the last
73
+ token of a sequence built with special tokens.
74
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
75
+ The cls token, which is a special token used as the first token for all tasks.
76
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
77
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
78
+ token instead.
79
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
80
+ The token used for padding, for example when batching sequences of different lengths.
81
+ mask_token(`str`, *optional*, defaults to `"<mask>"`):
82
+ The token used for masking values. This is the token used when training this model with masking tasks. This
83
+ is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the
84
+ downstream tasks.
85
+ language_codes (`str`, *optional*, defaults to `"base"`):
86
+ What language codes to use. Should be one of `"base"` or `"multi"`.
87
+ sp_model_kwargs (`dict`, *optional*):
88
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
89
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
90
+ to set:
91
+ - `enable_sampling`: Enable subword regularization.
92
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
93
+ - `nbest_size = {0,1}`: No sampling is performed.
94
+ - `nbest_size > 1`: samples from the nbest_size results.
95
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
96
+ using forward-filtering-and-backward-sampling algorithm.
97
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
98
+ BPE-dropout.
99
+
100
+ Examples:
101
+
102
+ ```python
103
+ >>> from transformers import PLBartTokenizer
104
+
105
+ >>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX")
106
+ >>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])"
107
+ >>> expected_translation_english = "Returns the maximum value of a b c."
108
+ >>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt")
109
+ ```"""
110
+
111
+ vocab_files_names = VOCAB_FILES_NAMES
112
+ model_input_names = ["input_ids", "attention_mask"]
113
+
114
+ prefix_tokens: List[int] = []
115
+ suffix_tokens: List[int] = []
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_file,
120
+ bos_token="<s>",
121
+ eos_token="</s>",
122
+ sep_token="</s>",
123
+ cls_token="<s>",
124
+ unk_token="<unk>",
125
+ pad_token="<pad>",
126
+ mask_token="<mask>",
127
+ language_codes="base",
128
+ tokenizer_file=None,
129
+ src_lang=None,
130
+ tgt_lang=None,
131
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
132
+ additional_special_tokens=None,
133
+ **kwargs,
134
+ ):
135
+ # Mask token behave like a normal word, i.e. include the space before it
136
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
137
+
138
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
139
+ src_lang = self._convert_lang_code_special_format(src_lang)
140
+ tgt_lang = self._convert_lang_code_special_format(tgt_lang)
141
+
142
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
143
+ self.sp_model.Load(str(vocab_file))
144
+ self.vocab_file = vocab_file
145
+ self.language_codes = language_codes
146
+
147
+ fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
148
+
149
+ # Original fairseq vocab and spm vocab must be "aligned":
150
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
151
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
152
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
153
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
154
+
155
+ # Mimic fairseq token-to-id alignment for the first 4 token
156
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
157
+
158
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
159
+ self.fairseq_offset = 1
160
+
161
+ self.sp_model_size = len(self.sp_model)
162
+ self.lang_code_to_id = {
163
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes)
164
+ }
165
+ self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
166
+
167
+ if self.language_codes == "base":
168
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
169
+
170
+ self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
171
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
172
+ _additional_special_tokens = list(self.lang_code_to_id.keys())
173
+
174
+ if additional_special_tokens is not None:
175
+ # Only add those special tokens if they are not already there.
176
+ _additional_special_tokens.extend(
177
+ [t for t in additional_special_tokens if t not in _additional_special_tokens]
178
+ )
179
+
180
+ if self.language_codes == "base":
181
+ self._src_lang = src_lang
182
+ self.cur_lang_code_id = (
183
+ self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang
184
+ )
185
+ else:
186
+ self._src_lang = src_lang if src_lang is not None else "__en_XX__"
187
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
188
+
189
+ super().__init__(
190
+ bos_token=bos_token,
191
+ eos_token=eos_token,
192
+ unk_token=unk_token,
193
+ sep_token=sep_token,
194
+ cls_token=cls_token,
195
+ pad_token=pad_token,
196
+ mask_token=mask_token,
197
+ language_codes=language_codes,
198
+ tokenizer_file=tokenizer_file,
199
+ src_lang=src_lang,
200
+ tgt_lang=tgt_lang,
201
+ additional_special_tokens=_additional_special_tokens,
202
+ sp_model_kwargs=self.sp_model_kwargs,
203
+ **kwargs,
204
+ )
205
+
206
+ self.tgt_lang = tgt_lang
207
+ self.set_src_lang_special_tokens(self._src_lang)
208
+
209
+ def __getstate__(self):
210
+ state = self.__dict__.copy()
211
+ state["sp_model"] = None
212
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
213
+ return state
214
+
215
+ def __setstate__(self, d):
216
+ self.__dict__ = d
217
+
218
+ # for backward compatibility
219
+ if not hasattr(self, "sp_model_kwargs"):
220
+ self.sp_model_kwargs = {}
221
+
222
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
223
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
224
+
225
+ @property
226
+ def vocab_size(self):
227
+ if self.language_codes == "base":
228
+ return (
229
+ len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1
230
+ ) # Plus 1 for the mask token
231
+ else:
232
+ return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
233
+
234
+ @property
235
+ def src_lang(self) -> str:
236
+ return self._src_lang
237
+
238
+ @src_lang.setter
239
+ def src_lang(self, new_src_lang: str) -> None:
240
+ new_src_lang = self._convert_lang_code_special_format(new_src_lang)
241
+ self._src_lang = new_src_lang
242
+ self.set_src_lang_special_tokens(self._src_lang)
243
+
244
+ def get_special_tokens_mask(
245
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
246
+ ) -> List[int]:
247
+ """
248
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
249
+ special tokens using the tokenizer `prepare_for_model` method.
250
+
251
+ Args:
252
+ token_ids_0 (`List[int]`):
253
+ List of IDs.
254
+ token_ids_1 (`List[int]`, *optional*):
255
+ Optional second list of IDs for sequence pairs.
256
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
257
+ Whether or not the token list is already formatted with special tokens for the model.
258
+
259
+ Returns:
260
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
261
+ """
262
+
263
+ if already_has_special_tokens:
264
+ return super().get_special_tokens_mask(
265
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
266
+ )
267
+
268
+ prefix_ones = [1] * len(self.prefix_tokens)
269
+ suffix_ones = [1] * len(self.suffix_tokens)
270
+ if token_ids_1 is None:
271
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
272
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
273
+
274
+ def build_inputs_with_special_tokens(
275
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
276
+ ) -> List[int]:
277
+ """
278
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
279
+ adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence:
280
+
281
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
282
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
283
+
284
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
285
+ separator.
286
+
287
+ Args:
288
+ token_ids_0 (`List[int]`):
289
+ List of IDs to which the special tokens will be added.
290
+ token_ids_1 (`List[int]`, *optional*):
291
+ Optional second list of IDs for sequence pairs.
292
+
293
+ Returns:
294
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
295
+ """
296
+ if token_ids_1 is None:
297
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
298
+ # We don't expect to process pairs, but leave the pair logic for API consistency
299
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
300
+
301
+ def create_token_type_ids_from_sequences(
302
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
+ ) -> List[int]:
304
+ """
305
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not
306
+ make use of token type ids, therefore a list of zeros is returned.
307
+
308
+ Args:
309
+ token_ids_0 (`List[int]`):
310
+ List of IDs.
311
+ token_ids_1 (`List[int]`, *optional*):
312
+ Optional second list of IDs for sequence pairs.
313
+
314
+ Returns:
315
+ `List[int]`: List of zeros.
316
+ """
317
+
318
+ sep = [self.sep_token_id]
319
+ cls = [self.cls_token_id]
320
+
321
+ if token_ids_1 is None:
322
+ return len(cls + token_ids_0 + sep) * [0]
323
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
324
+
325
+ def _build_translation_inputs(
326
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
327
+ ):
328
+ """Used by translation pipeline, to prepare inputs for the generate function"""
329
+ if src_lang is None or tgt_lang is None:
330
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
331
+ self.src_lang = self._convert_lang_code_special_format(src_lang)
332
+ self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
333
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
334
+ tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang)
335
+ inputs["forced_bos_token_id"] = tgt_lang_id
336
+ return inputs
337
+
338
+ def get_vocab(self):
339
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
340
+ vocab.update(self.added_tokens_encoder)
341
+ return vocab
342
+
343
+ def _tokenize(self, text: str) -> List[str]:
344
+ return self.sp_model.encode(text, out_type=str)
345
+
346
+ def _convert_token_to_id(self, token):
347
+ """Converts a token (str) in an id using the vocab."""
348
+ if token in self.fairseq_tokens_to_ids:
349
+ return self.fairseq_tokens_to_ids[token]
350
+ spm_id = self.sp_model.PieceToId(token)
351
+
352
+ # Need to return unknown token if the SP model returned 0
353
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
354
+
355
+ def _convert_id_to_token(self, index):
356
+ """Converts an index (integer) in a token (str) using the vocab."""
357
+ if index in self.fairseq_ids_to_tokens:
358
+ return self.fairseq_ids_to_tokens[index]
359
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
360
+
361
+ def convert_tokens_to_string(self, tokens):
362
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
363
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
364
+ return out_string
365
+
366
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
367
+ if not os.path.isdir(save_directory):
368
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
369
+ return
370
+ out_vocab_file = os.path.join(
371
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
372
+ )
373
+
374
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
375
+ copyfile(self.vocab_file, out_vocab_file)
376
+ elif not os.path.isfile(self.vocab_file):
377
+ with open(out_vocab_file, "wb") as fi:
378
+ content_spiece_model = self.sp_model.serialized_model_proto()
379
+ fi.write(content_spiece_model)
380
+
381
+ return (out_vocab_file,)
382
+
383
+ def prepare_seq2seq_batch(
384
+ self,
385
+ src_texts: List[str],
386
+ src_lang: str = "en_XX",
387
+ tgt_texts: Optional[List[str]] = None,
388
+ tgt_lang: str = "python",
389
+ **kwargs,
390
+ ) -> BatchEncoding:
391
+ self.src_lang = self._convert_lang_code_special_format(src_lang)
392
+ self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
393
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
394
+
395
+ def _switch_to_input_mode(self):
396
+ return self.set_src_lang_special_tokens(self.src_lang)
397
+
398
+ def _switch_to_target_mode(self):
399
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
400
+
401
+ def set_src_lang_special_tokens(self, src_lang) -> None:
402
+ """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
403
+ src_lang = self._convert_lang_code_special_format(src_lang)
404
+ self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None
405
+ self.prefix_tokens = []
406
+ if self.cur_lang_code is not None:
407
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
408
+ else:
409
+ self.suffix_tokens = [self.eos_token_id]
410
+
411
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
412
+ """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
413
+ lang = self._convert_lang_code_special_format(lang)
414
+
415
+ self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None
416
+ self.prefix_tokens = []
417
+ if self.cur_lang_code is not None:
418
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
419
+ else:
420
+ self.suffix_tokens = [self.eos_token_id]
421
+
422
+ def _convert_lang_code_special_format(self, lang: str) -> str:
423
+ """Convert Language Codes to format tokenizer uses if required"""
424
+ lang = FAIRSEQ_LANGUAGE_CODES_MAP[lang] if lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys() else lang
425
+ return lang
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_seamless_m4t_v2": ["SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4Tv2Config"],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_seamless_m4t_v2"] = [
34
+ "SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "SeamlessM4Tv2ForTextToSpeech",
36
+ "SeamlessM4Tv2ForSpeechToSpeech",
37
+ "SeamlessM4Tv2ForTextToText",
38
+ "SeamlessM4Tv2ForSpeechToText",
39
+ "SeamlessM4Tv2Model",
40
+ "SeamlessM4Tv2PreTrainedModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_seamless_m4t_v2 import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4Tv2Config
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_seamless_m4t_v2 import (
53
+ SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ SeamlessM4Tv2ForSpeechToSpeech,
55
+ SeamlessM4Tv2ForSpeechToText,
56
+ SeamlessM4Tv2ForTextToSpeech,
57
+ SeamlessM4Tv2ForTextToText,
58
+ SeamlessM4Tv2Model,
59
+ SeamlessM4Tv2PreTrainedModel,
60
+ )
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc ADDED
Binary file (139 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SeamlessM4Tv2 model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class SeamlessM4Tv2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate
30
+ an SeamlessM4Tv2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the SeamlessM4Tv2
32
+ [""](https://huggingface.co/"") architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 256102):
40
+ Vocabulary size of the text modality of the SeamlessM4Tv2 model. Defines the number of different tokens
41
+ that can be represented by the `inputs_ids` passed when calling [`~SeamlessM4Tv2Model`],
42
+ [`~SeamlessM4Tv2ForTextToSpeech`] or [`~SeamlessM4Tv2ForTextToText`].
43
+ t2u_vocab_size (`int`, *optional*, defaults to 10082):
44
+ Unit vocabulary size of the SeamlessM4Tv2 model. Defines the number of different "unit tokens" that can be
45
+ represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4Tv2Model`],
46
+ [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
47
+ char_vocab_size (`int`, *optional*, defaults to 10943):
48
+ Character vocabulary size of the SeamlessM4Tv2 model. Defines the number of different character tokens that
49
+ can be represented by the `char_inputs_ids` passed when calling the Text-To-Units sub-model of
50
+ [`~SeamlessM4Tv2Model`], [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
51
+
52
+ > Parameters shared across sub-models
53
+
54
+ hidden_size (`int`, *optional*, defaults to 1024):
55
+ Dimensionality of the "intermediate" layers in the architecture.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
59
+ The epsilon used by the layer normalization layers.
60
+ use_cache (`bool`, *optional*, defaults to `True`):
61
+ Whether or not the model should return the last key/values attentions (not used by all models).
62
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
63
+ The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
64
+ this to something large just in case (e.g., 512 or 1024 or 2048).
65
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
66
+ Whether the model is used as an encoder/decoder or not.
67
+ encoder_layerdrop (`float`, *optional*, defaults to 0.05):
68
+ The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
69
+ for more details.
70
+ decoder_layerdrop (`float`, *optional*, defaults to 0.05):
71
+ The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
72
+ for more details.
73
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
74
+ The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
75
+ `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
76
+ dropout (`float`, *optional*, defaults to 0.1):
77
+ The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
78
+ attention_dropout (`float`, *optional*, defaults to 0.1):
79
+ The dropout probability for all attention layers.
80
+ activation_dropout (`float`, *optional*, defaults to 0.0):
81
+ The dropout probability for all activation layers in the model.
82
+ scale_embedding (`bool`, *optional*, defaults to `True`):
83
+ Scale embeddings by diving by sqrt(d_model).
84
+
85
+ > Text encoder and text decoder specific parameters
86
+
87
+ encoder_layers (`int`, *optional*, defaults to 24):
88
+ Number of hidden layers in the Transformer text encoder.
89
+ encoder_ffn_dim (`int`, *optional*, defaults to 8192):
90
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
91
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
92
+ Number of attention heads for each attention layer in the Transformer text encoder.
93
+ decoder_layers (`int`, *optional*, defaults to 24):
94
+ Number of hidden layers in the Transformer text decoder.
95
+ decoder_ffn_dim (`int`, *optional*, defaults to 8192):
96
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
97
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
98
+ Number of attention heads for each attention layer in the Transformer text decoder.
99
+ decoder_start_token_id (`int`, *optional*, defaults to 3):
100
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
101
+ applied in the text decoder.
102
+ max_new_tokens (`int`, *optional*, defaults to 256):
103
+ The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
104
+ pad_token_id (`int`, *optional*, defaults to 0):
105
+ The id of the _padding_ text token. Only applied to the text-decoder model.
106
+ bos_token_id (`int`, *optional*, defaults to 2):
107
+ The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
108
+ eos_token_id (`int`, *optional*, defaults to 3):
109
+ The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
110
+
111
+ > Speech encoder specific parameters
112
+
113
+ speech_encoder_layers (`int`, *optional*, defaults to 24):
114
+ Number of hidden layers in the Transformer speech encoder.
115
+ speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
116
+ Number of attention heads for each attention layer in the Transformer speech encoder.
117
+ speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
118
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
119
+ speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
120
+ The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
121
+ `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
122
+ speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
123
+ The dropout probability for all layers in the speech encoder.
124
+ add_adapter (`bool`, *optional*, defaults to `True`):
125
+ Add an adapter layer on top of the speech encoder.
126
+ speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
127
+ The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
128
+ https://arxiv.org/abs/1909.11556) for more details.
129
+ feature_projection_input_dim (`int`, *optional*, defaults to 160):
130
+ Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
131
+ input audios with [`SeamlessM4TFeatureExtractor`].
132
+ adaptor_kernel_size (`int`, *optional*, defaults to 8):
133
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
134
+ adaptor_stride (`int`, *optional*, defaults to 8):
135
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
136
+ adaptor_dropout (`float`, *optional*, defaults to 0.1):
137
+ The dropout probability for all layers in the speech adapter.
138
+ num_adapter_layers (`int`, *optional*, defaults to 1):
139
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
140
+ True`.
141
+ position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`):
142
+ Can be specified to `relative_key`. If left to `None`, no relative position embedding is applied. Only
143
+ applied to the speech encoder. For more information on `"relative_key"`, please refer to [Self-Attention
144
+ with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
145
+ conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
146
+ Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
147
+ left_max_position_embeddings (`int`, *optional*, defaults to 64):
148
+ The left clipping value for relative positions.
149
+ right_max_position_embeddings (`int`, *optional*, defaults to 8):
150
+ The right clipping value for relative positions.
151
+ speech_encoder_chunk_size (`int`, *optional*, defaults to 20000): The size of each attention chunk.
152
+ speech_encoder_left_chunk_num (`int`, *optional*, defaults to 128):
153
+ Number of chunks on the left up to which lookahead is allowed.
154
+
155
+ > Text-To-Unit (t2u) model specific parameters
156
+
157
+ t2u_bos_token_id (`int`, *optional*, defaults to 0):
158
+ The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
159
+ t2u_pad_token_id (`int`, *optional*, defaults to 1):
160
+ The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
161
+ t2u_eos_token_id (`int`, *optional*, defaults to 2):
162
+ The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
163
+ t2u_encoder_layers (`int`, *optional*, defaults to 6):
164
+ Number of hidden layers in the Transformer text-to-unit encoder.
165
+ t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
166
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
167
+ t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
168
+ Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
169
+ t2u_decoder_layers (`int`, *optional*, defaults to 6):
170
+ Number of hidden layers in the Transformer text-to-unit decoder.
171
+ t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
172
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
173
+ t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
174
+ Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
175
+ t2u_max_position_embeddings (`int`, *optional*, defaults to 4096):
176
+ The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
177
+ this to something large just in case (e.g., 512 or 1024 or 2048).
178
+ t2u_variance_predictor_embed_dim (`int`, *optional*, defaults to 1024):
179
+ The projection dimension of the text-to-unit's duration predictor.
180
+ t2u_variance_predictor_hidden_dim (`int`, *optional*, defaults to 256):
181
+ Internal dimension of the text-to-unit's duration predictor.
182
+ t2u_variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
183
+ Kernel size of the convolutional layers of the text-to-unit's duration predictor.
184
+ t2u_variance_pred_dropout (`float`, *optional*, defaults to 0.5):
185
+ The dropout probability of the text-to-unit's duration predictor.
186
+
187
+ > Hifi-Gan Vocoder specific parameters
188
+
189
+ sampling_rate (`int`, *optional*, defaults to 16000):
190
+ The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
191
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
192
+ The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
193
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
194
+ A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
195
+ The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
196
+ *upsample_kernel_sizes*. Applies to the vocoder only.
197
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
198
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
199
+ network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
200
+ the length of *upsample_rates*. Applies to the vocoder only.
201
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
202
+ A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
203
+ field fusion (MRF) module. Applies to the vocoder only.
204
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
205
+ A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
206
+ the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
207
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
208
+ The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
209
+ only.
210
+ unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
211
+ Vocabulary size of the SeamlessM4Tv2 vocoder. Defines the number of different unit tokens that can be
212
+ represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4Tv2Model`],
213
+ [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
214
+ unit_embed_dim (`int`, *optional*, defaults to 1280):
215
+ The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
216
+ lang_embed_dim (`int`, *optional*, defaults to 256):
217
+ The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
218
+ spkr_embed_dim (`int`, *optional*, defaults to 256):
219
+ The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
220
+ vocoder_num_langs (`int`, *optional*, defaults to 36):
221
+ Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
222
+ vocoder_num_spkrs (`int`, *optional*, defaults to 200):
223
+ Number of speakers supported by the vocoder.
224
+ variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
225
+ Kernel size of the duration predictor. Applies to the vocoder only.
226
+ var_pred_dropout (`float`, *optional*, defaults to 0.5):
227
+ The dropout probability of the duration predictor. Applies to the vocoder only.
228
+ vocoder_offset (`int`, *optional*, defaults to 4):
229
+ Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
230
+
231
+ ```python
232
+ >>> from transformers import SeamlessM4Tv2Model, SeamlessM4Tv2Config
233
+
234
+ >>> # Initializing a SeamlessM4Tv2 "" style configuration
235
+ >>> configuration = SeamlessM4Tv2Config()
236
+
237
+ >>> # Initializing a model from the "" style configuration
238
+ >>> model = SeamlessM4Tv2Model(configuration)
239
+
240
+ >>> # Accessing the model configuration
241
+ >>> configuration = model.config
242
+ ```"""
243
+
244
+ model_type = "seamless_m4t_v2"
245
+
246
+ def __init__(
247
+ self,
248
+ vocab_size=256102,
249
+ t2u_vocab_size=10082,
250
+ char_vocab_size=10943,
251
+ # shared config
252
+ hidden_size=1024,
253
+ initializer_range=0.02,
254
+ layer_norm_eps=1e-5,
255
+ use_cache=True,
256
+ max_position_embeddings=4096,
257
+ is_encoder_decoder=True,
258
+ encoder_layerdrop=0.05,
259
+ decoder_layerdrop=0.05,
260
+ activation_function="relu",
261
+ dropout=0.1,
262
+ attention_dropout=0.1,
263
+ activation_dropout=0.0,
264
+ scale_embedding=True,
265
+ # text encoder|decoder
266
+ encoder_layers=24,
267
+ encoder_ffn_dim=8192,
268
+ encoder_attention_heads=16,
269
+ decoder_layers=24,
270
+ decoder_ffn_dim=8192,
271
+ decoder_attention_heads=16,
272
+ decoder_start_token_id=3,
273
+ max_new_tokens=256,
274
+ pad_token_id=0,
275
+ bos_token_id=2,
276
+ eos_token_id=3,
277
+ # speech_encoder
278
+ speech_encoder_layers=24,
279
+ speech_encoder_attention_heads=16,
280
+ speech_encoder_intermediate_size=4096,
281
+ speech_encoder_hidden_act="swish",
282
+ speech_encoder_dropout=0.0,
283
+ add_adapter=True,
284
+ speech_encoder_layerdrop=0.1,
285
+ feature_projection_input_dim=160,
286
+ adaptor_kernel_size=8,
287
+ adaptor_stride=8,
288
+ adaptor_dropout=0.1,
289
+ num_adapter_layers=1,
290
+ position_embeddings_type="relative_key",
291
+ conv_depthwise_kernel_size=31,
292
+ left_max_position_embeddings=64,
293
+ right_max_position_embeddings=8,
294
+ speech_encoder_chunk_size=20000,
295
+ speech_encoder_left_chunk_num=128,
296
+ # t2u config
297
+ t2u_bos_token_id=0,
298
+ t2u_pad_token_id=1,
299
+ t2u_eos_token_id=2,
300
+ t2u_encoder_layers=6,
301
+ t2u_encoder_ffn_dim=8192,
302
+ t2u_encoder_attention_heads=16,
303
+ t2u_decoder_layers=6,
304
+ t2u_decoder_ffn_dim=8192,
305
+ t2u_decoder_attention_heads=16,
306
+ t2u_max_position_embeddings=4096,
307
+ t2u_variance_predictor_embed_dim=1024,
308
+ t2u_variance_predictor_hidden_dim=256,
309
+ t2u_variance_predictor_kernel_size=3,
310
+ t2u_variance_pred_dropout=0.5,
311
+ # hifi-gan vocoder config
312
+ sampling_rate=16000,
313
+ upsample_initial_channel=512,
314
+ upsample_rates=[5, 4, 4, 2, 2],
315
+ upsample_kernel_sizes=[11, 8, 8, 4, 4],
316
+ resblock_kernel_sizes=[3, 7, 11],
317
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
318
+ leaky_relu_slope=0.1,
319
+ # specific to Code Hifi-Gan
320
+ unit_hifi_gan_vocab_size=10000,
321
+ unit_embed_dim=1280,
322
+ lang_embed_dim=256,
323
+ spkr_embed_dim=256,
324
+ vocoder_num_langs=36,
325
+ vocoder_num_spkrs=200,
326
+ variance_predictor_kernel_size=3,
327
+ var_pred_dropout=0.5,
328
+ vocoder_offset=4,
329
+ **kwargs,
330
+ ):
331
+ # overall_config
332
+ self.vocab_size = vocab_size
333
+ self.t2u_vocab_size = t2u_vocab_size
334
+ self.char_vocab_size = char_vocab_size
335
+ self.hidden_size = hidden_size
336
+ self.initializer_range = initializer_range
337
+ self.layer_norm_eps = layer_norm_eps
338
+ self.max_position_embeddings = max_position_embeddings
339
+ self.use_cache = use_cache
340
+ self.max_new_tokens = max_new_tokens
341
+ self.encoder_layerdrop = encoder_layerdrop
342
+ self.decoder_layerdrop = decoder_layerdrop
343
+ self.activation_function = activation_function
344
+ self.dropout = dropout
345
+ self.attention_dropout = attention_dropout
346
+ self.activation_dropout = activation_dropout
347
+ self.scale_embedding = scale_embedding
348
+ # for proper config init
349
+ self.num_attention_heads = decoder_attention_heads
350
+ self.num_hidden_layers = decoder_layers
351
+
352
+ # text|unit encoder|decoder
353
+ self.encoder_layers = encoder_layers
354
+ self.encoder_ffn_dim = encoder_ffn_dim
355
+ self.encoder_attention_heads = encoder_attention_heads
356
+ self.decoder_layers = decoder_layers
357
+ self.decoder_ffn_dim = decoder_ffn_dim
358
+ self.decoder_attention_heads = decoder_attention_heads
359
+
360
+ # speech_encoder
361
+ self.speech_encoder_layers = speech_encoder_layers
362
+ self.speech_encoder_hidden_act = speech_encoder_hidden_act
363
+ self.speech_encoder_dropout = speech_encoder_dropout
364
+ self.speech_encoder_attention_heads = speech_encoder_attention_heads
365
+ self.speech_encoder_layerdrop = speech_encoder_layerdrop
366
+ self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
367
+ self.feature_projection_input_dim = feature_projection_input_dim
368
+ self.adaptor_kernel_size = adaptor_kernel_size
369
+ self.adaptor_stride = adaptor_stride
370
+ self.adaptor_dropout = adaptor_dropout
371
+ self.num_adapter_layers = num_adapter_layers
372
+ self.position_embeddings_type = position_embeddings_type
373
+ self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
374
+ self.add_adapter = add_adapter
375
+ self.left_max_position_embeddings = left_max_position_embeddings
376
+ self.right_max_position_embeddings = right_max_position_embeddings
377
+ self.speech_encoder_chunk_size = speech_encoder_chunk_size
378
+ self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num
379
+
380
+ # t2u config
381
+ self.t2u_bos_token_id = t2u_bos_token_id
382
+ self.t2u_pad_token_id = t2u_pad_token_id
383
+ self.t2u_eos_token_id = t2u_eos_token_id
384
+ self.t2u_encoder_layers = t2u_encoder_layers
385
+ self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
386
+ self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
387
+ self.t2u_decoder_layers = t2u_decoder_layers
388
+ self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
389
+ self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
390
+ self.t2u_max_position_embeddings = t2u_max_position_embeddings
391
+ self.t2u_variance_predictor_embed_dim = t2u_variance_predictor_embed_dim # TODO: add to docstrings
392
+ self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim # TODO: add to docstrings
393
+ self.t2u_variance_predictor_kernel_size = t2u_variance_predictor_kernel_size # TODO: add to docstrings
394
+ self.t2u_variance_pred_dropout = t2u_variance_pred_dropout # TODO: add to docstrings
395
+
396
+ # hifi-gan vocoder config
397
+ # original parameters specific to Hifi-Gan
398
+ self.sampling_rate = sampling_rate
399
+ self.upsample_initial_channel = upsample_initial_channel
400
+ self.upsample_rates = upsample_rates
401
+ self.upsample_kernel_sizes = upsample_kernel_sizes
402
+ self.resblock_kernel_sizes = resblock_kernel_sizes
403
+ self.resblock_dilation_sizes = resblock_dilation_sizes
404
+ self.leaky_relu_slope = leaky_relu_slope
405
+
406
+ # specific to Code Hifi-Gan
407
+ self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
408
+ self.unit_embed_dim = unit_embed_dim
409
+ self.lang_embed_dim = lang_embed_dim
410
+ self.spkr_embed_dim = spkr_embed_dim
411
+ self.vocoder_num_langs = vocoder_num_langs
412
+ self.vocoder_num_spkrs = vocoder_num_spkrs
413
+ self.variance_predictor_kernel_size = variance_predictor_kernel_size
414
+ self.var_pred_dropout = var_pred_dropout
415
+ self.vocoder_offset = vocoder_offset
416
+
417
+ super().__init__(
418
+ pad_token_id=pad_token_id,
419
+ bos_token_id=bos_token_id,
420
+ eos_token_id=eos_token_id,
421
+ decoder_start_token_id=decoder_start_token_id,
422
+ is_encoder_decoder=is_encoder_decoder,
423
+ max_position_embeddings=max_position_embeddings,
424
+ **kwargs,
425
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Stability AI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_stablelm": ["STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP", "StableLmConfig"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_stablelm"] = [
35
+ "StableLmForCausalLM",
36
+ "StableLmModel",
37
+ "StableLmPreTrainedModel",
38
+ "StableLmForSequenceClassification",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_stablelm import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP, StableLmConfig
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .modeling_stablelm import (
52
+ StableLmForCausalLM,
53
+ StableLmForSequenceClassification,
54
+ StableLmModel,
55
+ StableLmPreTrainedModel,
56
+ )
57
+
58
+
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (950 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__pycache__/configuration_stablelm.cpython-310.pyc ADDED
Binary file (8.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/__pycache__/modeling_stablelm.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/configuration_stablelm.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Stability AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ StableLM model configuration """
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class StableLmConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`~StableLmModel`].
30
+ It is used to instantiate an StableLM model according to the specified arguments, defining the model
31
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
32
+ the StableLM [stabilityai/stablelm-3b-4e1t](https://huggingface.co/stabilityai/stablelm-3b-4e1t) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used
35
+ to control the model outputs. Read the documentation from [`PretrainedConfig`]
36
+ for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 50304):
41
+ Vocabulary size of the StableLM model. Defines the number of different tokens that
42
+ can be represented by the `inputs_ids` passed when calling [`StableLmModel`].
43
+ intermediate_size (`int`, *optional*, defaults to 6912):
44
+ Dimension of the MLP representations.
45
+ hidden_size (`int`, *optional*, defaults to 2560):
46
+ Number of hidden layers in the Transformer decoder.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 32):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
+ `num_attention_heads`.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
60
+ The non-linear activation function (function or string).
61
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
62
+ The maximum sequence length that this model might ever be used with.
63
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing
66
+ all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions
71
+ (not used by all models). Only relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
73
+ Whether the model's input and output word embeddings should be tied.
74
+ rope_theta (`float`, *optional*, defaults to `10000.0`):
75
+ The base period of the RoPE embeddings.
76
+ rope_scaling (`Dict`, *optional*):
77
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
78
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
79
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
80
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
81
+ these scaling strategies behave:
82
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
83
+ is an experimental feature, subject to breaking API changes in future versions.
84
+ use_qkv_bias (`bool`, *optional*, defaults to `False`):
85
+ Whether or not the model should use bias for qkv layers.
86
+ qk_layernorm (`bool`, *optional*, defaults to `False`):
87
+ Whether or not to normalize, per head, the Queries and Keys after projecting the hidden states.
88
+ use_parallel_residual (`bool`, *optional*, defaults to `False`):
89
+ Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training
90
+ speedup at large scales.
91
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
92
+ The dropout ratio after applying the MLP to the hidden states.
93
+ attention_dropout (`float`, *optional*, defaults to 0.0):
94
+ The dropout ratio for the attention probabilities.
95
+ partial_rotary_factor (`float`, *optional*, defaults to 0.25):
96
+ Percentage of the query and keys which will have rotary embedding.
97
+ bos_token_id (int, *optional*, defaults to 0):
98
+ The id of the `BOS` token in the vocabulary.
99
+ eos_token_id (int, *optional*, defaults to 0):
100
+ The id of the `EOS` token in the vocabulary.
101
+
102
+ Example:
103
+
104
+ ```python
105
+ >>> from transformers import StableLmModel, StableLmConfig
106
+
107
+ >>> # Initializing a StableLM stablelm-3b style configuration
108
+ >>> configuration = StableLmConfig()
109
+ ```"""
110
+
111
+ model_type = "stablelm"
112
+ keys_to_ignore_at_inference = ["past_key_values"]
113
+
114
+ def __init__(
115
+ self,
116
+ vocab_size=50304,
117
+ intermediate_size=6912,
118
+ hidden_size=2560,
119
+ num_hidden_layers=32,
120
+ num_attention_heads=32,
121
+ num_key_value_heads=32,
122
+ hidden_act="silu",
123
+ max_position_embeddings=4096,
124
+ initializer_range=0.02,
125
+ layer_norm_eps=1.0e-5,
126
+ use_cache=True,
127
+ tie_word_embeddings=False,
128
+ rope_theta=10_000,
129
+ rope_scaling=None,
130
+ use_qkv_bias=False,
131
+ qk_layernorm=False,
132
+ use_parallel_residual=False,
133
+ hidden_dropout=0.0,
134
+ attention_dropout=0.0,
135
+ partial_rotary_factor=0.25,
136
+ bos_token_id=0,
137
+ eos_token_id=0,
138
+ **kwargs,
139
+ ):
140
+ self.vocab_size = vocab_size
141
+ self.max_position_embeddings = max_position_embeddings
142
+
143
+ self.hidden_size = hidden_size
144
+ self.intermediate_size = intermediate_size
145
+ self.num_hidden_layers = num_hidden_layers
146
+ self.num_attention_heads = num_attention_heads
147
+ self.num_key_value_heads = num_key_value_heads
148
+ self.hidden_act = hidden_act
149
+
150
+ self.initializer_range = initializer_range
151
+ self.layer_norm_eps = layer_norm_eps
152
+ self.use_cache = use_cache
153
+ self.rope_theta = rope_theta
154
+ self.rope_scaling = rope_scaling
155
+ self.use_qkv_bias = use_qkv_bias
156
+ self.qk_layernorm = qk_layernorm
157
+ self.use_parallel_residual = use_parallel_residual
158
+ self.hidden_dropout = hidden_dropout
159
+ self.attention_dropout = attention_dropout
160
+ self.partial_rotary_factor = partial_rotary_factor
161
+ self._rope_scaling_validation()
162
+
163
+ super().__init__(
164
+ bos_token_id=bos_token_id,
165
+ eos_token_id=eos_token_id,
166
+ tie_word_embeddings=tie_word_embeddings,
167
+ **kwargs,
168
+ )
169
+
170
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
171
+ def _rope_scaling_validation(self):
172
+ """
173
+ Validate the `rope_scaling` configuration.
174
+ """
175
+ if self.rope_scaling is None:
176
+ return
177
+
178
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
179
+ raise ValueError(
180
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
181
+ )
182
+ rope_scaling_type = self.rope_scaling.get("type", None)
183
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
184
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
185
+ raise ValueError(
186
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
187
+ )
188
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
189
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
llmeval-env/lib/python3.10/site-packages/transformers/models/stablelm/modeling_stablelm.py ADDED
@@ -0,0 +1,1385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch StableLM model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+
30
+ from ...activations import ACT2FN
31
+ from ...cache_utils import Cache, DynamicCache
32
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
33
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ is_flash_attn_2_available,
39
+ is_flash_attn_greater_or_equal_2_10,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_stablelm import StableLmConfig
44
+
45
+
46
+ if is_flash_attn_2_available():
47
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
48
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CONFIG_FOR_DOC = "StableLmConfig"
54
+
55
+
56
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
57
+ def _get_unpad_data(attention_mask):
58
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
59
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
60
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
61
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
62
+ return (
63
+ indices,
64
+ cu_seqlens,
65
+ max_seqlen_in_batch,
66
+ )
67
+
68
+
69
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->StableLm
70
+ class StableLmRotaryEmbedding(nn.Module):
71
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
72
+ super().__init__()
73
+
74
+ self.dim = dim
75
+ self.max_position_embeddings = max_position_embeddings
76
+ self.base = base
77
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
78
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
79
+
80
+ # Build here to make `torch.jit.trace` work.
81
+ self._set_cos_sin_cache(
82
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
83
+ )
84
+
85
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
86
+ self.max_seq_len_cached = seq_len
87
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
88
+
89
+ freqs = torch.outer(t, self.inv_freq)
90
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
91
+ emb = torch.cat((freqs, freqs), dim=-1)
92
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
93
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
94
+
95
+ def forward(self, x, seq_len=None):
96
+ # x: [bs, num_attention_heads, seq_len, head_size]
97
+ if seq_len > self.max_seq_len_cached:
98
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
99
+
100
+ return (
101
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
102
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
103
+ )
104
+
105
+
106
+ # Copied from transformers.models.falcon.modeling_falcon.FalconLinearScalingRotaryEmbedding with Falcon->StableLm
107
+ class StableLmLinearScalingRotaryEmbedding(StableLmRotaryEmbedding):
108
+ """StableLmRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
109
+
110
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
111
+ self.scaling_factor = scaling_factor
112
+ super().__init__(dim, max_position_embeddings, base, device)
113
+
114
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
115
+ self.max_seq_len_cached = seq_len
116
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
117
+ t = t / self.scaling_factor
118
+
119
+ freqs = torch.outer(t, self.inv_freq)
120
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
121
+ emb = torch.cat((freqs, freqs), dim=-1)
122
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
123
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
124
+
125
+
126
+ # Copied from transformers.models.falcon.modeling_falcon.FalconDynamicNTKScalingRotaryEmbedding with Falcon->StableLm
127
+ class StableLmDynamicNTKScalingRotaryEmbedding(StableLmRotaryEmbedding):
128
+ """StableLmRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
129
+
130
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
131
+ self.scaling_factor = scaling_factor
132
+ super().__init__(dim, max_position_embeddings, base, device)
133
+
134
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
135
+ self.max_seq_len_cached = seq_len
136
+
137
+ if seq_len > self.max_position_embeddings:
138
+ base = self.base * (
139
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
140
+ ) ** (self.dim / (self.dim - 2))
141
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
142
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
143
+
144
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
145
+
146
+ freqs = torch.outer(t, self.inv_freq)
147
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
148
+ emb = torch.cat((freqs, freqs), dim=-1)
149
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
150
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
151
+
152
+
153
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
154
+ def rotate_half(x):
155
+ """Rotates half the hidden dims of the input."""
156
+ x1 = x[..., : x.shape[-1] // 2]
157
+ x2 = x[..., x.shape[-1] // 2 :]
158
+ return torch.cat((-x2, x1), dim=-1)
159
+
160
+
161
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
162
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
163
+ """Applies Rotary Position Embedding to the query and key tensors.
164
+
165
+ Args:
166
+ q (`torch.Tensor`): The query tensor.
167
+ k (`torch.Tensor`): The key tensor.
168
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
169
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
170
+ position_ids (`torch.Tensor`):
171
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
172
+ used to pass offsetted position ids when working with a KV-cache.
173
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
174
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
175
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
176
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
177
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
178
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
179
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
180
+ Returns:
181
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
182
+ """
183
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
184
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
185
+ q_embed = (q * cos) + (rotate_half(q) * sin)
186
+ k_embed = (k * cos) + (rotate_half(k) * sin)
187
+ return q_embed, k_embed
188
+
189
+
190
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->StableLm
191
+ class StableLmMLP(nn.Module):
192
+ def __init__(self, config):
193
+ super().__init__()
194
+ self.config = config
195
+ self.hidden_size = config.hidden_size
196
+ self.intermediate_size = config.intermediate_size
197
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
198
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
199
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
200
+ self.act_fn = ACT2FN[config.hidden_act]
201
+
202
+ def forward(self, x):
203
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
204
+
205
+
206
+ class StableLmLayerNormPerHead(nn.Module):
207
+ def __init__(self, dim, num_heads, eps=1e-5, bias=False):
208
+ super().__init__()
209
+ self.dim = dim
210
+ self.num_heads = num_heads
211
+ self.norms = nn.ModuleList([nn.LayerNorm(dim, eps=eps, bias=bias) for _ in range(self.num_heads)])
212
+
213
+ def forward(self, hidden_states: torch.Tensor):
214
+ # Split along the num_heads axis to get per-head inputs
215
+ # [batch_size, num_heads, seq_len, head_dim] -> [batch_size, 1, seq_len, head_dim] * num_heads
216
+ states_per_heads = torch.split(hidden_states, 1, dim=1)
217
+ # Normalize and merge the heads back together
218
+ return torch.cat([norm(hidden_states) for norm, hidden_states in zip(self.norms, states_per_heads)], dim=1)
219
+
220
+
221
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
222
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
223
+ """
224
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
225
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
226
+ """
227
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
228
+ if n_rep == 1:
229
+ return hidden_states
230
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
231
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
232
+
233
+
234
+ class StableLmAttention(nn.Module):
235
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
236
+
237
+ def __init__(self, config: StableLmConfig, layer_idx: Optional[int] = None):
238
+ super().__init__()
239
+ self.config = config
240
+ self.layer_idx = layer_idx
241
+ if layer_idx is None:
242
+ logger.warning_once(
243
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
244
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
245
+ "when creating this class."
246
+ )
247
+
248
+ self.hidden_size = config.hidden_size
249
+ self.num_heads = config.num_attention_heads
250
+ self.head_dim = self.hidden_size // self.num_heads
251
+ self.num_key_value_heads = config.num_key_value_heads
252
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
253
+ self.max_position_embeddings = config.max_position_embeddings
254
+ self.rope_theta = config.rope_theta
255
+ self.partial_rotary_factor = config.partial_rotary_factor
256
+ self.is_causal = True
257
+
258
+ if (self.head_dim * self.num_heads) != self.hidden_size:
259
+ raise ValueError(
260
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
261
+ f" and `num_heads`: {self.num_heads})."
262
+ )
263
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.use_qkv_bias)
264
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.use_qkv_bias)
265
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.use_qkv_bias)
266
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
267
+
268
+ self.qk_layernorm = config.qk_layernorm
269
+ if self.qk_layernorm:
270
+ self.q_layernorm = StableLmLayerNormPerHead(self.head_dim, self.num_heads, eps=config.layer_norm_eps)
271
+ self.k_layernorm = StableLmLayerNormPerHead(
272
+ self.head_dim, self.num_key_value_heads, eps=config.layer_norm_eps
273
+ )
274
+
275
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
276
+ self._init_rope()
277
+
278
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonAttention._init_rope with Persimmon->StableLm
279
+ def _init_rope(self):
280
+ if self.config.rope_scaling is None:
281
+ self.rotary_emb = StableLmRotaryEmbedding(
282
+ int(self.partial_rotary_factor * self.head_dim),
283
+ max_position_embeddings=self.max_position_embeddings,
284
+ base=self.rope_theta,
285
+ )
286
+ else:
287
+ scaling_type = self.config.rope_scaling["type"]
288
+ scaling_factor = self.config.rope_scaling["factor"]
289
+ if scaling_type == "linear":
290
+ self.rotary_emb = StableLmLinearScalingRotaryEmbedding(
291
+ int(self.partial_rotary_factor * self.head_dim),
292
+ max_position_embeddings=self.max_position_embeddings,
293
+ scaling_factor=scaling_factor,
294
+ base=self.rope_theta,
295
+ )
296
+ elif scaling_type == "dynamic":
297
+ self.rotary_emb = StableLmDynamicNTKScalingRotaryEmbedding(
298
+ int(self.partial_rotary_factor * self.head_dim),
299
+ max_position_embeddings=self.max_position_embeddings,
300
+ scaling_factor=scaling_factor,
301
+ base=self.rope_theta,
302
+ )
303
+ else:
304
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
305
+
306
+ def forward(
307
+ self,
308
+ hidden_states: torch.Tensor,
309
+ attention_mask: Optional[torch.Tensor] = None,
310
+ position_ids: Optional[torch.LongTensor] = None,
311
+ past_key_value: Optional[Cache] = None,
312
+ output_attentions: bool = False,
313
+ use_cache: bool = False,
314
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
315
+ bsz, q_len, _ = hidden_states.size()
316
+
317
+ query_states = self.q_proj(hidden_states)
318
+ key_states = self.k_proj(hidden_states)
319
+ value_states = self.v_proj(hidden_states)
320
+
321
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
322
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
323
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
324
+
325
+ if self.qk_layernorm:
326
+ query_states = self.q_layernorm(query_states)
327
+ key_states = self.k_layernorm(key_states)
328
+
329
+ kv_seq_len = key_states.shape[-2]
330
+ if past_key_value is not None:
331
+ if self.layer_idx is None:
332
+ raise ValueError(
333
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
334
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
335
+ "with a layer index."
336
+ )
337
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
338
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
339
+
340
+ # Partial rotary embedding
341
+ query_rot, query_pass = (
342
+ query_states[..., : self.rotary_emb.dim],
343
+ query_states[..., self.rotary_emb.dim :],
344
+ )
345
+ key_rot, key_pass = (
346
+ key_states[..., : self.rotary_emb.dim],
347
+ key_states[..., self.rotary_emb.dim :],
348
+ )
349
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
350
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
351
+
352
+ # [batch_size, seq_length, num_heads, head_dim]
353
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
354
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
355
+
356
+ if past_key_value is not None:
357
+ # Specific to RoPE models with partial rotation
358
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
359
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
360
+
361
+ # Repeat k/v heads if n_kv_heads < n_heads
362
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
363
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
364
+
365
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
366
+
367
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
368
+ raise ValueError(
369
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
370
+ f" {attn_weights.size()}"
371
+ )
372
+
373
+ if attention_mask is not None:
374
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
375
+ raise ValueError(
376
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
377
+ )
378
+ attn_weights = attn_weights + attention_mask
379
+
380
+ # upcast attention to fp32
381
+ attn_weights = nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query_states.dtype)
382
+ attn_weights = self.attention_dropout(attn_weights)
383
+
384
+ attn_output = torch.matmul(attn_weights, value_states)
385
+
386
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
387
+ raise ValueError(
388
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
389
+ f" {attn_output.size()}"
390
+ )
391
+
392
+ attn_output = attn_output.transpose(1, 2).contiguous()
393
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
394
+
395
+ attn_output = self.o_proj(attn_output)
396
+
397
+ if not output_attentions:
398
+ attn_weights = None
399
+
400
+ return attn_output, attn_weights, past_key_value
401
+
402
+
403
+ class StableLmSdpaAttention(StableLmAttention):
404
+ def forward(
405
+ self,
406
+ hidden_states: torch.Tensor,
407
+ attention_mask: Optional[torch.Tensor] = None,
408
+ position_ids: Optional[torch.LongTensor] = None,
409
+ past_key_value: Optional[Cache] = None,
410
+ output_attentions: bool = False,
411
+ use_cache: bool = False,
412
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
413
+ if output_attentions:
414
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
415
+ logger.warning_once(
416
+ "StableLmModel is using StableLmSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
417
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
418
+ )
419
+ return super().forward(
420
+ hidden_states=hidden_states,
421
+ attention_mask=attention_mask,
422
+ position_ids=position_ids,
423
+ past_key_value=past_key_value,
424
+ output_attentions=output_attentions,
425
+ use_cache=use_cache,
426
+ )
427
+
428
+ bsz, q_len, _ = hidden_states.size()
429
+
430
+ query_states = self.q_proj(hidden_states)
431
+ key_states = self.k_proj(hidden_states)
432
+ value_states = self.v_proj(hidden_states)
433
+
434
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
435
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
436
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
437
+
438
+ if self.qk_layernorm:
439
+ query_states = self.q_layernorm(query_states)
440
+ key_states = self.k_layernorm(key_states)
441
+
442
+ kv_seq_len = key_states.shape[-2]
443
+ if past_key_value is not None:
444
+ if self.layer_idx is None:
445
+ raise ValueError(
446
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
447
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
448
+ "with a layer index."
449
+ )
450
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
451
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
452
+
453
+ # Partial rotary embedding
454
+ query_rot, query_pass = (
455
+ query_states[..., : self.rotary_emb.dim],
456
+ query_states[..., self.rotary_emb.dim :],
457
+ )
458
+ key_rot, key_pass = (
459
+ key_states[..., : self.rotary_emb.dim],
460
+ key_states[..., self.rotary_emb.dim :],
461
+ )
462
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
463
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
464
+
465
+ # [batch_size, seq_length, num_heads, head_dim]
466
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
467
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
468
+
469
+ if past_key_value is not None:
470
+ # Specific to RoPE models with partial rotation
471
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
472
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
473
+
474
+ # Repeat k/v heads if n_kv_heads < n_heads
475
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
476
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
477
+
478
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
479
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
480
+ if query_states.device.type == "cuda" and attention_mask is not None:
481
+ query_states = query_states.contiguous()
482
+ key_states = key_states.contiguous()
483
+ value_states = value_states.contiguous()
484
+
485
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
486
+ query_states,
487
+ key_states,
488
+ value_states,
489
+ attn_mask=attention_mask,
490
+ dropout_p=self.attention_dropout.p if self.training else 0.0,
491
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
492
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
493
+ )
494
+
495
+ attn_output = attn_output.transpose(1, 2).contiguous()
496
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
497
+
498
+ attn_output = self.o_proj(attn_output)
499
+
500
+ return attn_output, None, past_key_value
501
+
502
+
503
+ class StableLmFlashAttention2(StableLmAttention):
504
+ """
505
+ StableLM flash attention module. This module inherits from `StableLmAttention` as the weights of the module stays
506
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
507
+ flash attention and deal with padding tokens in case the input contains any of them.
508
+ """
509
+
510
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
511
+ def __init__(self, *args, **kwargs):
512
+ super().__init__(*args, **kwargs)
513
+
514
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
515
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
516
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
517
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
518
+
519
+ def forward(
520
+ self,
521
+ hidden_states: torch.Tensor,
522
+ attention_mask: Optional[torch.LongTensor] = None,
523
+ position_ids: Optional[torch.LongTensor] = None,
524
+ past_key_value: Optional[Cache] = None,
525
+ output_attentions: bool = False,
526
+ use_cache: bool = False,
527
+ **kwargs,
528
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
529
+ # StableLmFlashAttention2 attention does not support output_attentions
530
+
531
+ output_attentions = False
532
+
533
+ bsz, q_len, _ = hidden_states.size()
534
+
535
+ query_states = self.q_proj(hidden_states)
536
+ key_states = self.k_proj(hidden_states)
537
+ value_states = self.v_proj(hidden_states)
538
+
539
+ # Flash attention requires the input to have the shape
540
+ # batch_size x seq_length x head_dim x hidden_dim
541
+ # therefore we just need to keep the original shape
542
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
543
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
544
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
545
+
546
+ if self.qk_layernorm:
547
+ query_states = self.q_layernorm(query_states)
548
+ key_states = self.k_layernorm(key_states)
549
+
550
+ kv_seq_len = key_states.shape[-2]
551
+ if past_key_value is not None:
552
+ if self.layer_idx is None:
553
+ raise ValueError(
554
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
555
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
556
+ "with a layer index."
557
+ )
558
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
559
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
560
+
561
+ # Partial rotary embedding
562
+ query_rot, query_pass = (
563
+ query_states[..., : self.rotary_emb.dim],
564
+ query_states[..., self.rotary_emb.dim :],
565
+ )
566
+ key_rot, key_pass = (
567
+ key_states[..., : self.rotary_emb.dim],
568
+ key_states[..., self.rotary_emb.dim :],
569
+ )
570
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
571
+
572
+ # [batch_size, seq_length, num_heads, head_dim]
573
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
574
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
575
+
576
+ if past_key_value is not None:
577
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
578
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
579
+
580
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
581
+ # to be able to avoid many of these transpose/reshape/view.
582
+ query_states = query_states.transpose(1, 2)
583
+ key_states = key_states.transpose(1, 2)
584
+ value_states = value_states.transpose(1, 2)
585
+
586
+ dropout_rate = self.attention_dropout.p if self.training else 0.0
587
+
588
+ attn_output = self._flash_attention_forward(
589
+ query_states,
590
+ key_states,
591
+ value_states,
592
+ attention_mask,
593
+ q_len,
594
+ dropout=dropout_rate,
595
+ )
596
+
597
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
598
+ attn_output = self.o_proj(attn_output)
599
+
600
+ if not output_attentions:
601
+ attn_weights = None
602
+
603
+ return attn_output, attn_weights, past_key_value
604
+
605
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
606
+ def _flash_attention_forward(
607
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
608
+ ):
609
+ """
610
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
611
+ first unpad the input, then computes the attention scores and pad the final attention scores.
612
+
613
+ Args:
614
+ query_states (`torch.Tensor`):
615
+ Input query states to be passed to Flash Attention API
616
+ key_states (`torch.Tensor`):
617
+ Input key states to be passed to Flash Attention API
618
+ value_states (`torch.Tensor`):
619
+ Input value states to be passed to Flash Attention API
620
+ attention_mask (`torch.Tensor`):
621
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
622
+ position of padding tokens and 1 for the position of non-padding tokens.
623
+ dropout (`float`):
624
+ Attention dropout
625
+ softmax_scale (`float`, *optional*):
626
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
627
+ """
628
+ if not self._flash_attn_uses_top_left_mask:
629
+ causal = self.is_causal
630
+ else:
631
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
632
+ causal = self.is_causal and query_length != 1
633
+
634
+ # Contains at least one padding token in the sequence
635
+ if attention_mask is not None:
636
+ batch_size = query_states.shape[0]
637
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
638
+ query_states, key_states, value_states, attention_mask, query_length
639
+ )
640
+
641
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
642
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
643
+
644
+ attn_output_unpad = flash_attn_varlen_func(
645
+ query_states,
646
+ key_states,
647
+ value_states,
648
+ cu_seqlens_q=cu_seqlens_q,
649
+ cu_seqlens_k=cu_seqlens_k,
650
+ max_seqlen_q=max_seqlen_in_batch_q,
651
+ max_seqlen_k=max_seqlen_in_batch_k,
652
+ dropout_p=dropout,
653
+ softmax_scale=softmax_scale,
654
+ causal=causal,
655
+ )
656
+
657
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
658
+ else:
659
+ attn_output = flash_attn_func(
660
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
661
+ )
662
+
663
+ return attn_output
664
+
665
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
666
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
667
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
668
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
669
+
670
+ key_layer = index_first_axis(
671
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
672
+ )
673
+ value_layer = index_first_axis(
674
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
675
+ )
676
+ if query_length == kv_seq_len:
677
+ query_layer = index_first_axis(
678
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
679
+ )
680
+ cu_seqlens_q = cu_seqlens_k
681
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
682
+ indices_q = indices_k
683
+ elif query_length == 1:
684
+ max_seqlen_in_batch_q = 1
685
+ cu_seqlens_q = torch.arange(
686
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
687
+ ) # There is a memcpy here, that is very bad.
688
+ indices_q = cu_seqlens_q[:-1]
689
+ query_layer = query_layer.squeeze(1)
690
+ else:
691
+ # The -q_len: slice assumes left padding.
692
+ attention_mask = attention_mask[:, -query_length:]
693
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
694
+
695
+ return (
696
+ query_layer,
697
+ key_layer,
698
+ value_layer,
699
+ indices_q,
700
+ (cu_seqlens_q, cu_seqlens_k),
701
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
702
+ )
703
+
704
+
705
+ ATTENTION_CLASSES = {
706
+ "eager": StableLmAttention,
707
+ "sdpa": StableLmSdpaAttention,
708
+ "flash_attention_2": StableLmFlashAttention2,
709
+ }
710
+
711
+
712
+ class StableLmDecoderLayer(nn.Module):
713
+ def __init__(self, config: StableLmConfig, layer_idx: int):
714
+ super().__init__()
715
+ self.use_parallel_residual = config.use_parallel_residual
716
+ self.hidden_size = config.hidden_size
717
+ self.self_attn = ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
718
+ self.mlp = StableLmMLP(config)
719
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
720
+ self.post_attention_layernorm = None
721
+ if not self.use_parallel_residual:
722
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
723
+ self.dropout = nn.Dropout(config.hidden_dropout)
724
+
725
+ def forward(
726
+ self,
727
+ hidden_states: torch.Tensor,
728
+ attention_mask: Optional[torch.Tensor] = None,
729
+ position_ids: Optional[torch.LongTensor] = None,
730
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
731
+ output_attentions: Optional[bool] = False,
732
+ use_cache: Optional[bool] = False,
733
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
734
+ """
735
+ Args:
736
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
737
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
738
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
739
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
740
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
741
+ `[0, config.n_positions - 1]`.
742
+
743
+ [What are position IDs?](../glossary#position-ids)
744
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
745
+ cached past key and value projection states
746
+ output_attentions (`bool`, *optional*):
747
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
748
+ returned tensors for more detail.
749
+ use_cache (`bool`, *optional*):
750
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
751
+ (see `past_key_values`).
752
+ """
753
+
754
+ residual = hidden_states
755
+
756
+ hidden_states = self.input_layernorm(hidden_states)
757
+
758
+ # Self Attention
759
+ self_attn_output, self_attn_weights, present_key_value = self.self_attn(
760
+ hidden_states=hidden_states,
761
+ attention_mask=attention_mask,
762
+ position_ids=position_ids,
763
+ past_key_value=past_key_value,
764
+ output_attentions=output_attentions,
765
+ use_cache=use_cache,
766
+ )
767
+
768
+ # copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXLayer.forward
769
+ if self.use_parallel_residual:
770
+ # x = x + attn(ln1(x)) + mlp(ln1(x))
771
+ # Fully Connected
772
+ mlp_output = self.mlp(hidden_states)
773
+ mlp_output = self.dropout(mlp_output)
774
+ hidden_states = residual + self_attn_output + mlp_output
775
+ else:
776
+ # x = x + attn(ln1(x))
777
+ # x = x + mlp(ln2(x))
778
+ residual = residual + self_attn_output
779
+ # Fully Connected
780
+ mlp_output = self.mlp(self.post_attention_layernorm(residual))
781
+ mlp_output = self.dropout(mlp_output)
782
+ hidden_states = residual + mlp_output
783
+
784
+ outputs = (hidden_states,)
785
+
786
+ if output_attentions:
787
+ outputs += (self_attn_weights,)
788
+
789
+ if use_cache:
790
+ outputs += (present_key_value,)
791
+
792
+ return outputs
793
+
794
+
795
+ STABLELM_START_DOCSTRING = r"""
796
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
797
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
798
+ etc.)
799
+
800
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
801
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
802
+ and behavior.
803
+
804
+ Parameters:
805
+ config ([`StableLmConfig`]):
806
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
807
+ load the weights associated with the model, only the configuration. Check out the
808
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
809
+ """
810
+
811
+
812
+ @add_start_docstrings(
813
+ "The bare StableLm Model outputting raw hidden-states without any specific head on top.",
814
+ STABLELM_START_DOCSTRING,
815
+ )
816
+ class StableLmPreTrainedModel(PreTrainedModel):
817
+ config_class = StableLmConfig
818
+ base_model_prefix = "model"
819
+ supports_gradient_checkpointing = True
820
+ _no_split_modules = ["StableLmDecoderLayer"]
821
+ _skip_keys_device_placement = "past_key_values"
822
+ _supports_flash_attn_2 = True
823
+ _supports_cache_class = True
824
+ _supports_sdpa = True
825
+
826
+ def _init_weights(self, module):
827
+ std = self.config.initializer_range
828
+ if isinstance(module, nn.Linear):
829
+ module.weight.data.normal_(mean=0.0, std=std)
830
+ if module.bias is not None:
831
+ module.bias.data.zero_()
832
+ elif isinstance(module, nn.Embedding):
833
+ module.weight.data.normal_(mean=0.0, std=std)
834
+ if module.padding_idx is not None:
835
+ module.weight.data[module.padding_idx].zero_()
836
+
837
+
838
+ STABLELM_INPUTS_DOCSTRING = r"""
839
+ Args:
840
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
841
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
842
+ it.
843
+
844
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
845
+ [`PreTrainedTokenizer.__call__`] for details.
846
+
847
+ [What are input IDs?](../glossary#input-ids)
848
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
849
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
850
+
851
+ - 1 for tokens that are **not masked**,
852
+ - 0 for tokens that are **masked**.
853
+
854
+ [What are attention masks?](../glossary#attention-mask)
855
+
856
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
857
+ [`PreTrainedTokenizer.__call__`] for details.
858
+
859
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
860
+ `past_key_values`).
861
+
862
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
863
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
864
+ information on the default strategy.
865
+
866
+ - 1 indicates the head is **not masked**,
867
+ - 0 indicates the head is **masked**.
868
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
869
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
870
+ config.n_positions - 1]`.
871
+
872
+ [What are position IDs?](../glossary#position-ids)
873
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
874
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
875
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
876
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
877
+
878
+ Two formats are allowed:
879
+ - a [`~cache_utils.Cache`] instance;
880
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
881
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
882
+ cache format.
883
+
884
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
885
+ legacy cache format will be returned.
886
+
887
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
888
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
889
+ of shape `(batch_size, sequence_length)`.
890
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
891
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
892
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
893
+ model's internal embedding lookup matrix.
894
+ use_cache (`bool`, *optional*):
895
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
896
+ `past_key_values`).
897
+ output_attentions (`bool`, *optional*):
898
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
899
+ tensors for more detail.
900
+ output_hidden_states (`bool`, *optional*):
901
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
902
+ more detail.
903
+ return_dict (`bool`, *optional*):
904
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
905
+ """
906
+
907
+
908
+ @add_start_docstrings(
909
+ "The bare StableLm Model outputting raw hidden-states without any specific head on top.",
910
+ STABLELM_START_DOCSTRING,
911
+ )
912
+ class StableLmModel(StableLmPreTrainedModel):
913
+ """
914
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`StableLmDecoderLayer`]
915
+
916
+ Args:
917
+ config: StableLmConfig
918
+ """
919
+
920
+ def __init__(self, config: StableLmConfig):
921
+ super().__init__(config)
922
+ self.padding_idx = config.pad_token_id
923
+ self.vocab_size = config.vocab_size
924
+
925
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
926
+ self.layers = nn.ModuleList(
927
+ [StableLmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
928
+ )
929
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
930
+
931
+ self._attn_implementation = config._attn_implementation
932
+ self.gradient_checkpointing = False
933
+ # Initialize weights and apply final processing
934
+ self.post_init()
935
+
936
+ def get_input_embeddings(self):
937
+ return self.embed_tokens
938
+
939
+ def set_input_embeddings(self, value):
940
+ self.embed_tokens = value
941
+
942
+ @add_start_docstrings_to_model_forward(STABLELM_INPUTS_DOCSTRING)
943
+ def forward(
944
+ self,
945
+ input_ids: torch.LongTensor = None,
946
+ attention_mask: Optional[torch.Tensor] = None,
947
+ position_ids: Optional[torch.LongTensor] = None,
948
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
949
+ inputs_embeds: Optional[torch.FloatTensor] = None,
950
+ use_cache: Optional[bool] = None,
951
+ output_attentions: Optional[bool] = None,
952
+ output_hidden_states: Optional[bool] = None,
953
+ return_dict: Optional[bool] = None,
954
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
955
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
956
+ output_hidden_states = (
957
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
958
+ )
959
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
960
+
961
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
962
+
963
+ # retrieve input_ids and inputs_embeds
964
+ if input_ids is not None and inputs_embeds is not None:
965
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
966
+ elif input_ids is not None:
967
+ batch_size, seq_length = input_ids.shape
968
+ elif inputs_embeds is not None:
969
+ batch_size, seq_length, _ = inputs_embeds.shape
970
+ else:
971
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
972
+
973
+ seq_length_with_past = seq_length
974
+ past_key_values_length = 0
975
+
976
+ if self.gradient_checkpointing and self.training:
977
+ if use_cache:
978
+ logger.warning_once(
979
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
980
+ )
981
+ use_cache = False
982
+
983
+ if use_cache:
984
+ use_legacy_cache = not isinstance(past_key_values, Cache)
985
+ if use_legacy_cache:
986
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
987
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
988
+ seq_length_with_past = seq_length_with_past + past_key_values_length
989
+
990
+ if position_ids is None:
991
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
992
+ position_ids = torch.arange(
993
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
994
+ )
995
+ position_ids = position_ids.unsqueeze(0)
996
+
997
+ if inputs_embeds is None:
998
+ inputs_embeds = self.embed_tokens(input_ids)
999
+ # embed positions
1000
+ if self._attn_implementation == "flash_attention_2":
1001
+ # 2d mask is passed through the layers
1002
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1003
+ # for output_attentions case used fallback to eager attention realization
1004
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1005
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1006
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1007
+ )
1008
+ else:
1009
+ # 4d mask is passed through the layers
1010
+ attention_mask = _prepare_4d_causal_attention_mask(
1011
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1012
+ )
1013
+
1014
+ hidden_states = inputs_embeds
1015
+
1016
+ # decoder layers
1017
+ all_hidden_states = () if output_hidden_states else None
1018
+ all_self_attns = () if output_attentions else None
1019
+ next_decoder_cache = None
1020
+
1021
+ for decoder_layer in self.layers:
1022
+ if output_hidden_states:
1023
+ all_hidden_states += (hidden_states,)
1024
+
1025
+ if self.gradient_checkpointing and self.training:
1026
+ layer_outputs = self._gradient_checkpointing_func(
1027
+ decoder_layer.__call__,
1028
+ hidden_states,
1029
+ attention_mask,
1030
+ position_ids,
1031
+ past_key_values,
1032
+ output_attentions,
1033
+ )
1034
+ else:
1035
+ layer_outputs = decoder_layer(
1036
+ hidden_states,
1037
+ attention_mask=attention_mask,
1038
+ position_ids=position_ids,
1039
+ past_key_value=past_key_values,
1040
+ output_attentions=output_attentions,
1041
+ use_cache=use_cache,
1042
+ )
1043
+
1044
+ hidden_states = layer_outputs[0]
1045
+
1046
+ if use_cache:
1047
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1048
+
1049
+ if output_attentions:
1050
+ all_self_attns += (layer_outputs[1],)
1051
+
1052
+ hidden_states = self.norm(hidden_states)
1053
+
1054
+ # add hidden states from the last decoder layer
1055
+ if output_hidden_states:
1056
+ all_hidden_states += (hidden_states,)
1057
+
1058
+ next_cache = None
1059
+ if use_cache:
1060
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1061
+
1062
+ if not return_dict:
1063
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1064
+ return BaseModelOutputWithPast(
1065
+ last_hidden_state=hidden_states,
1066
+ past_key_values=next_cache,
1067
+ hidden_states=all_hidden_states,
1068
+ attentions=all_self_attns,
1069
+ )
1070
+
1071
+
1072
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM with PERSIMMON->STABLELM,Persimmon->StableLm
1073
+ class StableLmForCausalLM(StableLmPreTrainedModel):
1074
+ _tied_weights_keys = ["lm_head.weight"]
1075
+
1076
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->STABLELM,Llama->StableLm
1077
+ def __init__(self, config):
1078
+ super().__init__(config)
1079
+ self.model = StableLmModel(config)
1080
+ self.vocab_size = config.vocab_size
1081
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1082
+
1083
+ # Initialize weights and apply final processing
1084
+ self.post_init()
1085
+
1086
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1087
+ def get_input_embeddings(self):
1088
+ return self.model.embed_tokens
1089
+
1090
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1091
+ def set_input_embeddings(self, value):
1092
+ self.model.embed_tokens = value
1093
+
1094
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1095
+ def get_output_embeddings(self):
1096
+ return self.lm_head
1097
+
1098
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1099
+ def set_output_embeddings(self, new_embeddings):
1100
+ self.lm_head = new_embeddings
1101
+
1102
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1103
+ def set_decoder(self, decoder):
1104
+ self.model = decoder
1105
+
1106
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1107
+ def get_decoder(self):
1108
+ return self.model
1109
+
1110
+ @add_start_docstrings_to_model_forward(STABLELM_INPUTS_DOCSTRING)
1111
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1112
+ # Ignore copy
1113
+ def forward(
1114
+ self,
1115
+ input_ids: torch.LongTensor = None,
1116
+ attention_mask: Optional[torch.Tensor] = None,
1117
+ position_ids: Optional[torch.LongTensor] = None,
1118
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1119
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1120
+ labels: Optional[torch.LongTensor] = None,
1121
+ use_cache: Optional[bool] = None,
1122
+ output_attentions: Optional[bool] = None,
1123
+ output_hidden_states: Optional[bool] = None,
1124
+ return_dict: Optional[bool] = None,
1125
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1126
+ r"""
1127
+ Args:
1128
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1129
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1130
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1131
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1132
+
1133
+ Returns:
1134
+
1135
+ Example:
1136
+
1137
+ ```python
1138
+ >>> from transformers import AutoTokenizer, StableLmForCausalLM
1139
+
1140
+ >>> model = StableLmForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t")
1141
+ >>> tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
1142
+
1143
+ >>> prompt = "The weather is always wonderful in"
1144
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1145
+
1146
+ >>> # Generate
1147
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1148
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1149
+ 'The weather is always wonderful in the summer in the city of San Diego. The city is located on the coast of the Pacific Ocean and is surrounded by'
1150
+ ```"""
1151
+
1152
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1153
+ output_hidden_states = (
1154
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1155
+ )
1156
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1157
+
1158
+ outputs = self.model(
1159
+ input_ids=input_ids,
1160
+ attention_mask=attention_mask,
1161
+ position_ids=position_ids,
1162
+ past_key_values=past_key_values,
1163
+ inputs_embeds=inputs_embeds,
1164
+ use_cache=use_cache,
1165
+ output_attentions=output_attentions,
1166
+ output_hidden_states=output_hidden_states,
1167
+ return_dict=return_dict,
1168
+ )
1169
+
1170
+ hidden_states = outputs[0]
1171
+ logits = self.lm_head(hidden_states)
1172
+
1173
+ loss = None
1174
+ if labels is not None:
1175
+ # Shift so that tokens < n predict n
1176
+ shift_logits = logits[..., :-1, :].contiguous()
1177
+ shift_labels = labels[..., 1:].contiguous()
1178
+ # Flatten the tokens
1179
+ loss_fct = CrossEntropyLoss()
1180
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1181
+ shift_labels = shift_labels.view(-1)
1182
+ # Enable model parallelism
1183
+ shift_labels = shift_labels.to(shift_logits.device)
1184
+ loss = loss_fct(shift_logits, shift_labels)
1185
+
1186
+ if not return_dict:
1187
+ output = (logits,) + outputs[1:]
1188
+ return (loss,) + output if loss is not None else output
1189
+
1190
+ return CausalLMOutputWithPast(
1191
+ loss=loss,
1192
+ logits=logits,
1193
+ past_key_values=outputs.past_key_values,
1194
+ hidden_states=outputs.hidden_states,
1195
+ attentions=outputs.attentions,
1196
+ )
1197
+
1198
+ def prepare_inputs_for_generation(
1199
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1200
+ ):
1201
+ if past_key_values is not None:
1202
+ if isinstance(past_key_values, Cache):
1203
+ cache_length = past_key_values.get_seq_length()
1204
+ past_length = past_key_values.seen_tokens
1205
+ max_cache_length = past_key_values.get_max_length()
1206
+ else:
1207
+ cache_length = past_length = past_key_values[0][0].shape[2]
1208
+ max_cache_length = None
1209
+
1210
+ # Keep only the unprocessed tokens:
1211
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1212
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1213
+ # input)
1214
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1215
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1216
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1217
+ # input_ids based on the past_length.
1218
+ elif past_length < input_ids.shape[1]:
1219
+ input_ids = input_ids[:, past_length:]
1220
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1221
+
1222
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1223
+ if (
1224
+ max_cache_length is not None
1225
+ and attention_mask is not None
1226
+ and cache_length + input_ids.shape[1] > max_cache_length
1227
+ ):
1228
+ attention_mask = attention_mask[:, -max_cache_length:]
1229
+
1230
+ position_ids = kwargs.get("position_ids", None)
1231
+ if attention_mask is not None and position_ids is None:
1232
+ # create position_ids on the fly for batch generation
1233
+ position_ids = attention_mask.long().cumsum(-1) - 1
1234
+ position_ids.masked_fill_(attention_mask == 0, 1)
1235
+ if past_key_values:
1236
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1237
+
1238
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1239
+ if inputs_embeds is not None and past_key_values is None:
1240
+ model_inputs = {"inputs_embeds": inputs_embeds}
1241
+ else:
1242
+ model_inputs = {"input_ids": input_ids}
1243
+
1244
+ model_inputs.update(
1245
+ {
1246
+ "position_ids": position_ids,
1247
+ "past_key_values": past_key_values,
1248
+ "use_cache": kwargs.get("use_cache"),
1249
+ "attention_mask": attention_mask,
1250
+ }
1251
+ )
1252
+ return model_inputs
1253
+
1254
+ @staticmethod
1255
+ def _reorder_cache(past_key_values, beam_idx):
1256
+ reordered_past = ()
1257
+ for layer_past in past_key_values:
1258
+ reordered_past += (
1259
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1260
+ )
1261
+ return reordered_past
1262
+
1263
+
1264
+ @add_start_docstrings(
1265
+ """
1266
+ The StableLm transformer with a sequence classification head on top (linear layer).
1267
+
1268
+ [`StableLmForSequenceClassification`] uses the last token in order to do the classification, as other causal
1269
+ models (e.g. GPT-2) do.
1270
+
1271
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1272
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1273
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1274
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1275
+ each row of the batch).
1276
+ """,
1277
+ STABLELM_START_DOCSTRING,
1278
+ )
1279
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->STABLELM,Llama->StableLm
1280
+ class StableLmForSequenceClassification(StableLmPreTrainedModel):
1281
+ def __init__(self, config):
1282
+ super().__init__(config)
1283
+ self.num_labels = config.num_labels
1284
+ self.model = StableLmModel(config)
1285
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1286
+
1287
+ # Initialize weights and apply final processing
1288
+ self.post_init()
1289
+
1290
+ def get_input_embeddings(self):
1291
+ return self.model.embed_tokens
1292
+
1293
+ def set_input_embeddings(self, value):
1294
+ self.model.embed_tokens = value
1295
+
1296
+ @add_start_docstrings_to_model_forward(STABLELM_INPUTS_DOCSTRING)
1297
+ def forward(
1298
+ self,
1299
+ input_ids: torch.LongTensor = None,
1300
+ attention_mask: Optional[torch.Tensor] = None,
1301
+ position_ids: Optional[torch.LongTensor] = None,
1302
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1303
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1304
+ labels: Optional[torch.LongTensor] = None,
1305
+ use_cache: Optional[bool] = None,
1306
+ output_attentions: Optional[bool] = None,
1307
+ output_hidden_states: Optional[bool] = None,
1308
+ return_dict: Optional[bool] = None,
1309
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1310
+ r"""
1311
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1312
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1313
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1314
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1315
+ """
1316
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1317
+
1318
+ transformer_outputs = self.model(
1319
+ input_ids,
1320
+ attention_mask=attention_mask,
1321
+ position_ids=position_ids,
1322
+ past_key_values=past_key_values,
1323
+ inputs_embeds=inputs_embeds,
1324
+ use_cache=use_cache,
1325
+ output_attentions=output_attentions,
1326
+ output_hidden_states=output_hidden_states,
1327
+ return_dict=return_dict,
1328
+ )
1329
+ hidden_states = transformer_outputs[0]
1330
+ logits = self.score(hidden_states)
1331
+
1332
+ if input_ids is not None:
1333
+ batch_size = input_ids.shape[0]
1334
+ else:
1335
+ batch_size = inputs_embeds.shape[0]
1336
+
1337
+ if self.config.pad_token_id is None and batch_size != 1:
1338
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1339
+ if self.config.pad_token_id is None:
1340
+ sequence_lengths = -1
1341
+ else:
1342
+ if input_ids is not None:
1343
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1344
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1345
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1346
+ sequence_lengths = sequence_lengths.to(logits.device)
1347
+ else:
1348
+ sequence_lengths = -1
1349
+
1350
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1351
+
1352
+ loss = None
1353
+ if labels is not None:
1354
+ labels = labels.to(logits.device)
1355
+ if self.config.problem_type is None:
1356
+ if self.num_labels == 1:
1357
+ self.config.problem_type = "regression"
1358
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1359
+ self.config.problem_type = "single_label_classification"
1360
+ else:
1361
+ self.config.problem_type = "multi_label_classification"
1362
+
1363
+ if self.config.problem_type == "regression":
1364
+ loss_fct = MSELoss()
1365
+ if self.num_labels == 1:
1366
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1367
+ else:
1368
+ loss = loss_fct(pooled_logits, labels)
1369
+ elif self.config.problem_type == "single_label_classification":
1370
+ loss_fct = CrossEntropyLoss()
1371
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1372
+ elif self.config.problem_type == "multi_label_classification":
1373
+ loss_fct = BCEWithLogitsLoss()
1374
+ loss = loss_fct(pooled_logits, labels)
1375
+ if not return_dict:
1376
+ output = (pooled_logits,) + transformer_outputs[1:]
1377
+ return ((loss,) + output) if loss is not None else output
1378
+
1379
+ return SequenceClassifierOutputWithPast(
1380
+ loss=loss,
1381
+ logits=pooled_logits,
1382
+ past_key_values=transformer_outputs.past_key_values,
1383
+ hidden_states=transformer_outputs.hidden_states,
1384
+ attentions=transformer_outputs.attentions,
1385
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_visual_bert": ["VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VisualBertConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_visual_bert"] = [
28
+ "VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "VisualBertForMultipleChoice",
30
+ "VisualBertForPreTraining",
31
+ "VisualBertForQuestionAnswering",
32
+ "VisualBertForRegionToPhraseAlignment",
33
+ "VisualBertForVisualReasoning",
34
+ "VisualBertLayer",
35
+ "VisualBertModel",
36
+ "VisualBertPreTrainedModel",
37
+ ]
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .configuration_visual_bert import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, VisualBertConfig
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ from .modeling_visual_bert import (
50
+ VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
51
+ VisualBertForMultipleChoice,
52
+ VisualBertForPreTraining,
53
+ VisualBertForQuestionAnswering,
54
+ VisualBertForRegionToPhraseAlignment,
55
+ VisualBertForVisualReasoning,
56
+ VisualBertLayer,
57
+ VisualBertModel,
58
+ VisualBertPreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/configuration_visual_bert.cpython-310.pyc ADDED
Binary file (6.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/__pycache__/modeling_visual_bert.cpython-310.pyc ADDED
Binary file (46.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/configuration_visual_bert.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ VisualBERT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class VisualBertConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`VisualBertModel`]. It is used to instantiate an
30
+ VisualBERT model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the VisualBERT
32
+ [uclanlp/visualbert-vqa-coco-pre](https://huggingface.co/uclanlp/visualbert-vqa-coco-pre) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 30522):
40
+ Vocabulary size of the VisualBERT model. Defines the number of different tokens that can be represented by
41
+ the `inputs_ids` passed when calling [`VisualBertModel`]. Vocabulary size of the model. Defines the
42
+ different tokens that can be represented by the `inputs_ids` passed to the forward method of
43
+ [`VisualBertModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ visual_embedding_dim (`int`, *optional*, defaults to 512):
47
+ Dimensionality of the visual embeddings to be passed to the model.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`VisualBertModel`].
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ bypass_transformer (`bool`, *optional*, defaults to `False`):
71
+ Whether or not the model should bypass the transformer for the visual embeddings. If set to `True`, the
72
+ model directly concatenates the visual embeddings from [`VisualBertEmbeddings`] with text output from
73
+ transformers, and then pass it to a self-attention layer.
74
+ special_visual_initialize (`bool`, *optional*, defaults to `True`):
75
+ Whether or not the visual token type and position type embedding weights should be initialized the same as
76
+ the textual token type and positive type embeddings. When set to `True`, the weights of the textual token
77
+ type and position type embeddings are copied to the respective visual embedding layers.
78
+
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import VisualBertConfig, VisualBertModel
84
+
85
+ >>> # Initializing a VisualBERT visualbert-vqa-coco-pre style configuration
86
+ >>> configuration = VisualBertConfig.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
87
+
88
+ >>> # Initializing a model (with random weights) from the visualbert-vqa-coco-pre style configuration
89
+ >>> model = VisualBertModel(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "visual_bert"
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=30522,
100
+ hidden_size=768,
101
+ visual_embedding_dim=512,
102
+ num_hidden_layers=12,
103
+ num_attention_heads=12,
104
+ intermediate_size=3072,
105
+ hidden_act="gelu",
106
+ hidden_dropout_prob=0.1,
107
+ attention_probs_dropout_prob=0.1,
108
+ max_position_embeddings=512,
109
+ type_vocab_size=2,
110
+ initializer_range=0.02,
111
+ layer_norm_eps=1e-12,
112
+ bypass_transformer=False,
113
+ special_visual_initialize=True,
114
+ pad_token_id=1,
115
+ bos_token_id=0,
116
+ eos_token_id=2,
117
+ **kwargs,
118
+ ):
119
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
120
+
121
+ self.vocab_size = vocab_size
122
+ self.max_position_embeddings = max_position_embeddings
123
+ self.hidden_size = hidden_size
124
+ self.visual_embedding_dim = visual_embedding_dim
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.intermediate_size = intermediate_size
128
+ self.hidden_act = hidden_act
129
+ self.hidden_dropout_prob = hidden_dropout_prob
130
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
131
+ self.initializer_range = initializer_range
132
+ self.type_vocab_size = type_vocab_size
133
+ self.layer_norm_eps = layer_norm_eps
134
+ self.bypass_transformer = bypass_transformer
135
+ self.special_visual_initialize = special_visual_initialize
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert VisualBert checkpoint."""
16
+
17
+
18
+ import argparse
19
+ from collections import OrderedDict
20
+ from pathlib import Path
21
+
22
+ import torch
23
+
24
+ from transformers import (
25
+ VisualBertConfig,
26
+ VisualBertForMultipleChoice,
27
+ VisualBertForPreTraining,
28
+ VisualBertForQuestionAnswering,
29
+ VisualBertForVisualReasoning,
30
+ )
31
+ from transformers.utils import logging
32
+
33
+
34
+ logging.set_verbosity_info()
35
+ logger = logging.get_logger(__name__)
36
+
37
+ rename_keys_prefix = [
38
+ ("bert.bert", "visual_bert"),
39
+ ("bert.cls", "cls"),
40
+ ("bert.classifier", "cls"),
41
+ ("token_type_embeddings_visual", "visual_token_type_embeddings"),
42
+ ("position_embeddings_visual", "visual_position_embeddings"),
43
+ ("projection", "visual_projection"),
44
+ ]
45
+
46
+ ACCEPTABLE_CHECKPOINTS = [
47
+ "nlvr2_coco_pre_trained.th",
48
+ "nlvr2_fine_tuned.th",
49
+ "nlvr2_pre_trained.th",
50
+ "vcr_coco_pre_train.th",
51
+ "vcr_fine_tune.th",
52
+ "vcr_pre_train.th",
53
+ "vqa_coco_pre_trained.th",
54
+ "vqa_fine_tuned.th",
55
+ "vqa_pre_trained.th",
56
+ ]
57
+
58
+
59
+ def load_state_dict(checkpoint_path):
60
+ sd = torch.load(checkpoint_path, map_location="cpu")
61
+ return sd
62
+
63
+
64
+ def get_new_dict(d, config, rename_keys_prefix=rename_keys_prefix):
65
+ new_d = OrderedDict()
66
+ new_d["visual_bert.embeddings.position_ids"] = torch.arange(config.max_position_embeddings).expand((1, -1))
67
+ # detector_d = OrderedDict()
68
+ for key in d:
69
+ if "detector" in key:
70
+ # detector_d[key.replace('detector.','')] = d[key]
71
+ continue
72
+ new_key = key
73
+ for name_pair in rename_keys_prefix:
74
+ new_key = new_key.replace(name_pair[0], name_pair[1])
75
+ new_d[new_key] = d[key]
76
+ if key == "bert.cls.predictions.decoder.weight":
77
+ # Old bert code didn't have `decoder.bias`, but was added separately
78
+ new_d["cls.predictions.decoder.bias"] = new_d["cls.predictions.bias"]
79
+ return new_d
80
+
81
+
82
+ @torch.no_grad()
83
+ def convert_visual_bert_checkpoint(checkpoint_path, pytorch_dump_folder_path):
84
+ """
85
+ Copy/paste/tweak model's weights to our VisualBERT structure.
86
+ """
87
+
88
+ assert (
89
+ checkpoint_path.split("/")[-1] in ACCEPTABLE_CHECKPOINTS
90
+ ), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
91
+
92
+ # Get Config
93
+ if "pre" in checkpoint_path:
94
+ model_type = "pretraining"
95
+ if "vcr" in checkpoint_path:
96
+ config_params = {"visual_embedding_dim": 512}
97
+ elif "vqa_advanced" in checkpoint_path:
98
+ config_params = {"visual_embedding_dim": 2048}
99
+ elif "vqa" in checkpoint_path:
100
+ config_params = {"visual_embedding_dim": 2048}
101
+ elif "nlvr" in checkpoint_path:
102
+ config_params = {"visual_embedding_dim": 1024}
103
+ else:
104
+ raise NotImplementedError(f"No implementation found for `{checkpoint_path}`.")
105
+ else:
106
+ if "vcr" in checkpoint_path:
107
+ config_params = {"visual_embedding_dim": 512}
108
+ model_type = "multichoice"
109
+ elif "vqa_advanced" in checkpoint_path:
110
+ config_params = {"visual_embedding_dim": 2048}
111
+ model_type = "vqa_advanced"
112
+ elif "vqa" in checkpoint_path:
113
+ config_params = {"visual_embedding_dim": 2048, "num_labels": 3129}
114
+ model_type = "vqa"
115
+ elif "nlvr" in checkpoint_path:
116
+ config_params = {
117
+ "visual_embedding_dim": 1024,
118
+ "num_labels": 2,
119
+ }
120
+ model_type = "nlvr"
121
+
122
+ config = VisualBertConfig(**config_params)
123
+
124
+ # Load State Dict
125
+ state_dict = load_state_dict(checkpoint_path)
126
+
127
+ new_state_dict = get_new_dict(state_dict, config)
128
+
129
+ if model_type == "pretraining":
130
+ model = VisualBertForPreTraining(config)
131
+ elif model_type == "vqa":
132
+ model = VisualBertForQuestionAnswering(config)
133
+ elif model_type == "nlvr":
134
+ model = VisualBertForVisualReasoning(config)
135
+ elif model_type == "multichoice":
136
+ model = VisualBertForMultipleChoice(config)
137
+
138
+ model.load_state_dict(new_state_dict)
139
+ # Save Checkpoints
140
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
141
+ model.save_pretrained(pytorch_dump_folder_path)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ parser = argparse.ArgumentParser()
146
+ # Required parameters
147
+ parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
148
+ parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
149
+ args = parser.parse_args()
150
+ convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/visual_bert/modeling_visual_bert.py ADDED
@@ -0,0 +1,1590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The UCLA NLP Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch VisualBERT model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss, KLDivLoss, LogSoftmax
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ BaseModelOutputWithPooling,
31
+ MultipleChoiceModelOutput,
32
+ SequenceClassifierOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_visual_bert import VisualBertConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CONFIG_FOR_DOC = "VisualBertConfig"
49
+ _CHECKPOINT_FOR_DOC = "uclanlp/visualbert-vqa-coco-pre"
50
+
51
+
52
+ from ..deprecated._archive_maps import VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ class VisualBertEmbeddings(nn.Module):
56
+ """Construct the embeddings from word, position and token_type embeddings and visual embeddings."""
57
+
58
+ def __init__(self, config):
59
+ super().__init__()
60
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
61
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
62
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
63
+
64
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
65
+ # any TensorFlow checkpoint file
66
+
67
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
68
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
69
+
70
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
71
+ self.register_buffer(
72
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
73
+ )
74
+
75
+ # For Visual Features
76
+ # Token type and position embedding for image features
77
+ self.visual_token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
78
+ self.visual_position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
79
+
80
+ if config.special_visual_initialize:
81
+ self.visual_token_type_embeddings.weight.data = nn.Parameter(
82
+ self.token_type_embeddings.weight.data.clone(), requires_grad=True
83
+ )
84
+ self.visual_position_embeddings.weight.data = nn.Parameter(
85
+ self.position_embeddings.weight.data.clone(), requires_grad=True
86
+ )
87
+
88
+ self.visual_projection = nn.Linear(config.visual_embedding_dim, config.hidden_size)
89
+
90
+ def forward(
91
+ self,
92
+ input_ids=None,
93
+ token_type_ids=None,
94
+ position_ids=None,
95
+ inputs_embeds=None,
96
+ visual_embeds=None,
97
+ visual_token_type_ids=None,
98
+ image_text_alignment=None,
99
+ ):
100
+ if input_ids is not None:
101
+ input_shape = input_ids.size()
102
+ else:
103
+ input_shape = inputs_embeds.size()[:-1]
104
+
105
+ seq_length = input_shape[1]
106
+
107
+ if position_ids is None:
108
+ position_ids = self.position_ids[:, :seq_length]
109
+
110
+ if inputs_embeds is None:
111
+ inputs_embeds = self.word_embeddings(input_ids)
112
+
113
+ if token_type_ids is None:
114
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
115
+
116
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
117
+
118
+ embeddings = inputs_embeds + token_type_embeddings
119
+
120
+ # Absolute Position Embeddings
121
+ position_embeddings = self.position_embeddings(position_ids)
122
+ embeddings += position_embeddings
123
+
124
+ if visual_embeds is not None:
125
+ if visual_token_type_ids is None:
126
+ visual_token_type_ids = torch.ones(
127
+ visual_embeds.size()[:-1], dtype=torch.long, device=self.position_ids.device
128
+ )
129
+
130
+ visual_embeds = self.visual_projection(visual_embeds)
131
+ visual_token_type_embeddings = self.visual_token_type_embeddings(visual_token_type_ids)
132
+
133
+ if image_text_alignment is not None:
134
+ # image_text_alignment = Batch x image_length x alignment_number.
135
+ # Each element denotes the position of the word corresponding to the image feature. -1 is the padding value.
136
+
137
+ dtype = token_type_embeddings.dtype
138
+ image_text_alignment_mask = (image_text_alignment != -1).long()
139
+ # Get rid of the -1.
140
+ image_text_alignment = image_text_alignment_mask * image_text_alignment
141
+
142
+ # Batch x image_length x alignment length x dim
143
+ visual_position_embeddings = self.position_embeddings(image_text_alignment)
144
+ visual_position_embeddings *= image_text_alignment_mask.to(dtype=dtype).unsqueeze(-1)
145
+ visual_position_embeddings = visual_position_embeddings.sum(2)
146
+
147
+ # We want to averge along the alignment_number dimension.
148
+ image_text_alignment_mask = image_text_alignment_mask.to(dtype=dtype).sum(2)
149
+
150
+ if (image_text_alignment_mask == 0).sum() != 0:
151
+ image_text_alignment_mask[image_text_alignment_mask == 0] = 1 # Avoid divide by zero error
152
+ logger.warning(
153
+ "Found 0 values in `image_text_alignment_mask`. Setting them to 1 to avoid divide-by-zero"
154
+ " error."
155
+ )
156
+ visual_position_embeddings = visual_position_embeddings / image_text_alignment_mask.unsqueeze(-1)
157
+
158
+ visual_position_ids = torch.zeros(
159
+ *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device
160
+ )
161
+
162
+ # When fine-tuning the detector , the image_text_alignment is sometimes padded too long.
163
+ if visual_position_embeddings.size(1) != visual_embeds.size(1):
164
+ if visual_position_embeddings.size(1) < visual_embeds.size(1):
165
+ raise ValueError(
166
+ f"Visual position embeddings length: {visual_position_embeddings.size(1)} "
167
+ f"should be the same as `visual_embeds` length: {visual_embeds.size(1)}"
168
+ )
169
+ visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :]
170
+
171
+ visual_position_embeddings = visual_position_embeddings + self.visual_position_embeddings(
172
+ visual_position_ids
173
+ )
174
+ else:
175
+ visual_position_ids = torch.zeros(
176
+ *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device
177
+ )
178
+ visual_position_embeddings = self.visual_position_embeddings(visual_position_ids)
179
+
180
+ visual_embeddings = visual_embeds + visual_position_embeddings + visual_token_type_embeddings
181
+
182
+ embeddings = torch.cat((embeddings, visual_embeddings), dim=1)
183
+
184
+ embeddings = self.LayerNorm(embeddings)
185
+ embeddings = self.dropout(embeddings)
186
+ return embeddings
187
+
188
+
189
+ class VisualBertSelfAttention(nn.Module):
190
+ def __init__(self, config):
191
+ super().__init__()
192
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
193
+ raise ValueError(
194
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
195
+ f"heads ({config.num_attention_heads})"
196
+ )
197
+
198
+ self.num_attention_heads = config.num_attention_heads
199
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
200
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
201
+
202
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
203
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
204
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
205
+
206
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
207
+
208
+ def transpose_for_scores(self, x):
209
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
210
+ x = x.view(*new_x_shape)
211
+ return x.permute(0, 2, 1, 3)
212
+
213
+ def forward(
214
+ self,
215
+ hidden_states,
216
+ attention_mask=None,
217
+ head_mask=None,
218
+ output_attentions=False,
219
+ ):
220
+ mixed_query_layer = self.query(hidden_states)
221
+
222
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
223
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
224
+
225
+ query_layer = self.transpose_for_scores(mixed_query_layer)
226
+
227
+ # Take the dot product between "query" and "key" to get the raw attention scores.
228
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
229
+
230
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
231
+ if attention_mask is not None:
232
+ # Apply the attention mask is (precomputed for all layers in VisualBertSelfAttentionModel forward() function)
233
+ attention_scores = attention_scores + attention_mask
234
+
235
+ # Normalize the attention scores to probabilities.
236
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
237
+
238
+ # This is actually dropping out entire tokens to attend to, which might
239
+ # seem a bit unusual, but is taken from the original Transformer paper.
240
+ attention_probs = self.dropout(attention_probs)
241
+
242
+ # Mask heads if we want to
243
+ if head_mask is not None:
244
+ attention_probs = attention_probs * head_mask
245
+
246
+ context_layer = torch.matmul(attention_probs, value_layer)
247
+
248
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
249
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
250
+ context_layer = context_layer.view(*new_context_layer_shape)
251
+
252
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
253
+
254
+ return outputs
255
+
256
+
257
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->VisualBert
258
+ class VisualBertSelfOutput(nn.Module):
259
+ def __init__(self, config):
260
+ super().__init__()
261
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
262
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
263
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
264
+
265
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
266
+ hidden_states = self.dense(hidden_states)
267
+ hidden_states = self.dropout(hidden_states)
268
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
269
+ return hidden_states
270
+
271
+
272
+ class VisualBertAttention(nn.Module):
273
+ def __init__(self, config):
274
+ super().__init__()
275
+ self.self = VisualBertSelfAttention(config)
276
+ self.output = VisualBertSelfOutput(config)
277
+ self.pruned_heads = set()
278
+
279
+ def prune_heads(self, heads):
280
+ if len(heads) == 0:
281
+ return
282
+ heads, index = find_pruneable_heads_and_indices(
283
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
284
+ )
285
+
286
+ # Prune linear layers
287
+ self.self.query = prune_linear_layer(self.self.query, index)
288
+ self.self.key = prune_linear_layer(self.self.key, index)
289
+ self.self.value = prune_linear_layer(self.self.value, index)
290
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
291
+
292
+ # Update hyper params and store pruned heads
293
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
294
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
295
+ self.pruned_heads = self.pruned_heads.union(heads)
296
+
297
+ def forward(
298
+ self,
299
+ hidden_states,
300
+ attention_mask=None,
301
+ head_mask=None,
302
+ output_attentions=False,
303
+ ):
304
+ self_outputs = self.self(
305
+ hidden_states,
306
+ attention_mask,
307
+ head_mask,
308
+ output_attentions,
309
+ )
310
+ attention_output = self.output(self_outputs[0], hidden_states)
311
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
312
+ return outputs
313
+
314
+
315
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->VisualBert
316
+ class VisualBertIntermediate(nn.Module):
317
+ def __init__(self, config):
318
+ super().__init__()
319
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
320
+ if isinstance(config.hidden_act, str):
321
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
322
+ else:
323
+ self.intermediate_act_fn = config.hidden_act
324
+
325
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
326
+ hidden_states = self.dense(hidden_states)
327
+ hidden_states = self.intermediate_act_fn(hidden_states)
328
+ return hidden_states
329
+
330
+
331
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->VisualBert
332
+ class VisualBertOutput(nn.Module):
333
+ def __init__(self, config):
334
+ super().__init__()
335
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
336
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
337
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
338
+
339
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
340
+ hidden_states = self.dense(hidden_states)
341
+ hidden_states = self.dropout(hidden_states)
342
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
343
+ return hidden_states
344
+
345
+
346
+ class VisualBertLayer(nn.Module):
347
+ def __init__(self, config):
348
+ super().__init__()
349
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
350
+ self.seq_len_dim = 1
351
+ self.attention = VisualBertAttention(config)
352
+ self.intermediate = VisualBertIntermediate(config)
353
+ self.output = VisualBertOutput(config)
354
+
355
+ def forward(
356
+ self,
357
+ hidden_states,
358
+ attention_mask=None,
359
+ head_mask=None,
360
+ output_attentions=False,
361
+ ):
362
+ self_attention_outputs = self.attention(
363
+ hidden_states,
364
+ attention_mask,
365
+ head_mask,
366
+ output_attentions=output_attentions,
367
+ )
368
+ attention_output = self_attention_outputs[0]
369
+
370
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
371
+
372
+ layer_output = apply_chunking_to_forward(
373
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
374
+ )
375
+ outputs = (layer_output,) + outputs
376
+
377
+ return outputs
378
+
379
+ def feed_forward_chunk(self, attention_output):
380
+ intermediate_output = self.intermediate(attention_output)
381
+ layer_output = self.output(intermediate_output, attention_output)
382
+ return layer_output
383
+
384
+
385
+ class VisualBertEncoder(nn.Module):
386
+ def __init__(self, config):
387
+ super().__init__()
388
+ self.config = config
389
+ self.layer = nn.ModuleList([VisualBertLayer(config) for _ in range(config.num_hidden_layers)])
390
+ self.gradient_checkpointing = False
391
+
392
+ def forward(
393
+ self,
394
+ hidden_states,
395
+ attention_mask=None,
396
+ head_mask=None,
397
+ output_attentions=False,
398
+ output_hidden_states=False,
399
+ return_dict=True,
400
+ ):
401
+ all_hidden_states = () if output_hidden_states else None
402
+ all_self_attentions = () if output_attentions else None
403
+
404
+ for i, layer_module in enumerate(self.layer):
405
+ if output_hidden_states:
406
+ all_hidden_states = all_hidden_states + (hidden_states,)
407
+
408
+ layer_head_mask = head_mask[i] if head_mask is not None else None
409
+
410
+ if self.gradient_checkpointing and self.training:
411
+ layer_outputs = self._gradient_checkpointing_func(
412
+ layer_module.__call__,
413
+ hidden_states,
414
+ attention_mask,
415
+ layer_head_mask,
416
+ output_attentions,
417
+ )
418
+ else:
419
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
420
+
421
+ hidden_states = layer_outputs[0]
422
+ if output_attentions:
423
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
424
+
425
+ if output_hidden_states:
426
+ all_hidden_states = all_hidden_states + (hidden_states,)
427
+
428
+ if not return_dict:
429
+ return tuple(
430
+ v
431
+ for v in [
432
+ hidden_states,
433
+ all_hidden_states,
434
+ all_self_attentions,
435
+ ]
436
+ if v is not None
437
+ )
438
+ return BaseModelOutput(
439
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
440
+ )
441
+
442
+
443
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->VisualBert
444
+ class VisualBertPooler(nn.Module):
445
+ def __init__(self, config):
446
+ super().__init__()
447
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
448
+ self.activation = nn.Tanh()
449
+
450
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
451
+ # We "pool" the model by simply taking the hidden state corresponding
452
+ # to the first token.
453
+ first_token_tensor = hidden_states[:, 0]
454
+ pooled_output = self.dense(first_token_tensor)
455
+ pooled_output = self.activation(pooled_output)
456
+ return pooled_output
457
+
458
+
459
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->VisualBert
460
+ class VisualBertPredictionHeadTransform(nn.Module):
461
+ def __init__(self, config):
462
+ super().__init__()
463
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
464
+ if isinstance(config.hidden_act, str):
465
+ self.transform_act_fn = ACT2FN[config.hidden_act]
466
+ else:
467
+ self.transform_act_fn = config.hidden_act
468
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
469
+
470
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
471
+ hidden_states = self.dense(hidden_states)
472
+ hidden_states = self.transform_act_fn(hidden_states)
473
+ hidden_states = self.LayerNorm(hidden_states)
474
+ return hidden_states
475
+
476
+
477
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->VisualBert
478
+ class VisualBertLMPredictionHead(nn.Module):
479
+ def __init__(self, config):
480
+ super().__init__()
481
+ self.transform = VisualBertPredictionHeadTransform(config)
482
+
483
+ # The output weights are the same as the input embeddings, but there is
484
+ # an output-only bias for each token.
485
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
486
+
487
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
488
+
489
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
490
+ self.decoder.bias = self.bias
491
+
492
+ def forward(self, hidden_states):
493
+ hidden_states = self.transform(hidden_states)
494
+ hidden_states = self.decoder(hidden_states)
495
+ return hidden_states
496
+
497
+
498
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->VisualBert
499
+ class VisualBertPreTrainingHeads(nn.Module):
500
+ def __init__(self, config):
501
+ super().__init__()
502
+ self.predictions = VisualBertLMPredictionHead(config)
503
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
504
+
505
+ def forward(self, sequence_output, pooled_output):
506
+ prediction_scores = self.predictions(sequence_output)
507
+ seq_relationship_score = self.seq_relationship(pooled_output)
508
+ return prediction_scores, seq_relationship_score
509
+
510
+
511
+ class VisualBertPreTrainedModel(PreTrainedModel):
512
+ """
513
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
514
+ models.
515
+ """
516
+
517
+ config_class = VisualBertConfig
518
+ base_model_prefix = "visual_bert"
519
+ supports_gradient_checkpointing = True
520
+
521
+ def _init_weights(self, module):
522
+ """Initialize the weights"""
523
+ if isinstance(module, (nn.Linear, nn.Embedding)):
524
+ # Slightly different from the TF version which uses truncated_normal for initialization
525
+ # cf https://github.com/pytorch/pytorch/pull/5617
526
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
527
+
528
+ elif isinstance(module, nn.LayerNorm):
529
+ module.bias.data.zero_()
530
+ module.weight.data.fill_(1.0)
531
+ if isinstance(module, nn.Linear) and module.bias is not None:
532
+ module.bias.data.zero_()
533
+
534
+
535
+ @dataclass
536
+ class VisualBertForPreTrainingOutput(ModelOutput):
537
+ """
538
+ Output type of [`VisualBertForPreTraining`].
539
+
540
+ Args:
541
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
542
+ Total loss as the sum of the masked language modeling loss and the sentence-image prediction
543
+ (classification) loss.
544
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
545
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
546
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
547
+ Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation
548
+ before SoftMax).
549
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
550
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
551
+ shape `(batch_size, sequence_length, hidden_size)`.
552
+
553
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
554
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
555
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
556
+ sequence_length)`.
557
+
558
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
559
+ heads.
560
+ """
561
+
562
+ loss: Optional[torch.FloatTensor] = None
563
+ prediction_logits: torch.FloatTensor = None
564
+ seq_relationship_logits: torch.FloatTensor = None
565
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
566
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
567
+
568
+
569
+ VISUAL_BERT_START_DOCSTRING = r"""
570
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
571
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
572
+ etc.)
573
+
574
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
575
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
576
+ and behavior.
577
+
578
+ Parameters:
579
+ config ([`VisualBertConfig`]): Model configuration class with all the parameters of the model.
580
+ Initializing with a config file does not load the weights associated with the model, only the
581
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
582
+ """
583
+
584
+ VISUAL_BERT_INPUTS_DOCSTRING = r"""
585
+ Args:
586
+ input_ids (`torch.LongTensor` of shape `({0})`):
587
+ Indices of input sequence tokens in the vocabulary.
588
+
589
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
590
+ [`PreTrainedTokenizer.__call__`] for details.
591
+
592
+ [What are input IDs?](../glossary#input-ids)
593
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
594
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
595
+
596
+ - 1 for tokens that are **not masked**,
597
+ - 0 for tokens that are **masked**.
598
+
599
+ [What are attention masks?](../glossary#attention-mask)
600
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
601
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
602
+ 1]`:
603
+
604
+ - 0 corresponds to a *sentence A* token,
605
+ - 1 corresponds to a *sentence B* token.
606
+
607
+ [What are token type IDs?](../glossary#token-type-ids)
608
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
609
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
610
+ config.max_position_embeddings - 1]`.
611
+
612
+ [What are position IDs?](../glossary#position-ids)
613
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
614
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
615
+
616
+ - 1 indicates the head is **not masked**,
617
+ - 0 indicates the head is **masked**.
618
+
619
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
620
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
621
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
622
+ model's internal embedding lookup matrix.
623
+
624
+ visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*):
625
+ The embedded representation of the visual inputs, generally derived using using an object detector.
626
+
627
+ visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*):
628
+ Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`:
629
+
630
+ - 1 for tokens that are **not masked**,
631
+ - 0 for tokens that are **masked**.
632
+
633
+ [What are attention masks?](../glossary#attention-mask)
634
+ visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*):
635
+ Segment token indices to indicate different portions of the visual embeds.
636
+
637
+ [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the
638
+ *visual_token_type_ids* to *1* for all tokens.
639
+
640
+ image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*):
641
+ Image-Text alignment uses to decide the position IDs of the visual embeddings.
642
+
643
+ output_attentions (`bool`, *optional*):
644
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
645
+ tensors for more detail.
646
+ output_hidden_states (`bool`, *optional*):
647
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
648
+ more detail.
649
+ return_dict (`bool`, *optional*):
650
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
651
+ """
652
+
653
+
654
+ @add_start_docstrings(
655
+ "The bare VisualBert Model transformer outputting raw hidden-states without any specific head on top.",
656
+ VISUAL_BERT_START_DOCSTRING,
657
+ )
658
+ class VisualBertModel(VisualBertPreTrainedModel):
659
+ """
660
+
661
+ The model can behave as an encoder (with only self-attention) following the architecture described in [Attention is
662
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
663
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
664
+ """
665
+
666
+ def __init__(self, config, add_pooling_layer=True):
667
+ super().__init__(config)
668
+ self.config = config
669
+
670
+ self.embeddings = VisualBertEmbeddings(config)
671
+ self.encoder = VisualBertEncoder(config)
672
+
673
+ self.pooler = VisualBertPooler(config) if add_pooling_layer else None
674
+
675
+ self.bypass_transformer = config.bypass_transformer
676
+
677
+ if self.bypass_transformer:
678
+ self.additional_layer = VisualBertLayer(config)
679
+
680
+ # Initialize weights and apply final processing
681
+ self.post_init()
682
+
683
+ def get_input_embeddings(self):
684
+ return self.embeddings.word_embeddings
685
+
686
+ def set_input_embeddings(self, value):
687
+ self.embeddings.word_embeddings = value
688
+
689
+ def _prune_heads(self, heads_to_prune):
690
+ """
691
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
692
+ class PreTrainedModel
693
+ """
694
+ for layer, heads in heads_to_prune.items():
695
+ self.encoder.layer[layer].attention.prune_heads(heads)
696
+
697
+ @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
698
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
699
+ def forward(
700
+ self,
701
+ input_ids: Optional[torch.LongTensor] = None,
702
+ attention_mask: Optional[torch.LongTensor] = None,
703
+ token_type_ids: Optional[torch.LongTensor] = None,
704
+ position_ids: Optional[torch.LongTensor] = None,
705
+ head_mask: Optional[torch.LongTensor] = None,
706
+ inputs_embeds: Optional[torch.FloatTensor] = None,
707
+ visual_embeds: Optional[torch.FloatTensor] = None,
708
+ visual_attention_mask: Optional[torch.LongTensor] = None,
709
+ visual_token_type_ids: Optional[torch.LongTensor] = None,
710
+ image_text_alignment: Optional[torch.LongTensor] = None,
711
+ output_attentions: Optional[bool] = None,
712
+ output_hidden_states: Optional[bool] = None,
713
+ return_dict: Optional[bool] = None,
714
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
715
+ r"""
716
+
717
+ Returns:
718
+
719
+ Example:
720
+
721
+ ```python
722
+ # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image.
723
+ from transformers import AutoTokenizer, VisualBertModel
724
+ import torch
725
+
726
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
727
+ model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
728
+
729
+ inputs = tokenizer("The capital of France is Paris.", return_tensors="pt")
730
+ visual_embeds = get_visual_embeddings(image).unsqueeze(0)
731
+ visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
732
+ visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
733
+
734
+ inputs.update(
735
+ {
736
+ "visual_embeds": visual_embeds,
737
+ "visual_token_type_ids": visual_token_type_ids,
738
+ "visual_attention_mask": visual_attention_mask,
739
+ }
740
+ )
741
+
742
+ outputs = model(**inputs)
743
+
744
+ last_hidden_states = outputs.last_hidden_state
745
+ ```"""
746
+
747
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
748
+ output_hidden_states = (
749
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
750
+ )
751
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
752
+
753
+ if input_ids is not None and inputs_embeds is not None:
754
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
755
+ elif input_ids is not None:
756
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
757
+ input_shape = input_ids.size()
758
+ elif inputs_embeds is not None:
759
+ input_shape = inputs_embeds.size()[:-1]
760
+ else:
761
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
762
+
763
+ batch_size, seq_length = input_shape
764
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
765
+
766
+ if visual_embeds is not None:
767
+ visual_input_shape = visual_embeds.size()[:-1]
768
+
769
+ if attention_mask is None:
770
+ attention_mask = torch.ones(input_shape, device=device)
771
+
772
+ if visual_embeds is not None and visual_attention_mask is None:
773
+ visual_attention_mask = torch.ones(visual_input_shape, device=device)
774
+
775
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
776
+ # ourselves in which case we just need to make it broadcastable to all heads.
777
+ if visual_embeds is not None:
778
+ combined_attention_mask = torch.cat((attention_mask, visual_attention_mask), dim=-1)
779
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
780
+ combined_attention_mask, (batch_size, input_shape + visual_input_shape)
781
+ )
782
+
783
+ else:
784
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
785
+ attention_mask, (batch_size, input_shape)
786
+ )
787
+
788
+ # Prepare head mask if needed
789
+ # 1.0 in head_mask indicate we keep the head
790
+ # attention_probs has shape bsz x n_heads x N x N
791
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
792
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
793
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
794
+
795
+ embedding_output = self.embeddings(
796
+ input_ids=input_ids,
797
+ position_ids=position_ids,
798
+ token_type_ids=token_type_ids,
799
+ inputs_embeds=inputs_embeds,
800
+ visual_embeds=visual_embeds,
801
+ visual_token_type_ids=visual_token_type_ids,
802
+ image_text_alignment=image_text_alignment,
803
+ )
804
+
805
+ if self.bypass_transformer and visual_embeds is not None:
806
+ text_length = input_ids.size(1)
807
+ text_embedding_output = embedding_output[:, :text_length, :]
808
+ visual_embedding_output = embedding_output[:, text_length:, :]
809
+
810
+ text_extended_attention_mask = extended_attention_mask[:, :, text_length, :text_length]
811
+
812
+ encoded_outputs = self.encoder(
813
+ text_embedding_output,
814
+ attention_mask=text_extended_attention_mask,
815
+ output_attentions=output_attentions,
816
+ output_hidden_states=output_hidden_states,
817
+ return_dict=return_dict,
818
+ )
819
+ sequence_output = encoded_outputs[0]
820
+ concatenated_input = torch.cat((sequence_output, visual_embedding_output), dim=1)
821
+ sequence_output = self.additional_layer(concatenated_input, extended_attention_mask)
822
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
823
+
824
+ else:
825
+ encoder_outputs = self.encoder(
826
+ embedding_output,
827
+ attention_mask=extended_attention_mask,
828
+ head_mask=head_mask,
829
+ output_attentions=output_attentions,
830
+ output_hidden_states=output_hidden_states,
831
+ return_dict=return_dict,
832
+ )
833
+ sequence_output = encoder_outputs[0]
834
+
835
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
836
+
837
+ if not return_dict:
838
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
839
+
840
+ return BaseModelOutputWithPooling(
841
+ last_hidden_state=sequence_output,
842
+ pooler_output=pooled_output,
843
+ hidden_states=encoder_outputs.hidden_states,
844
+ attentions=encoder_outputs.attentions,
845
+ )
846
+
847
+
848
+ @add_start_docstrings(
849
+ """
850
+ VisualBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
851
+ `sentence-image prediction (classification)` head.
852
+ """,
853
+ VISUAL_BERT_START_DOCSTRING,
854
+ )
855
+ class VisualBertForPreTraining(VisualBertPreTrainedModel):
856
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
857
+
858
+ def __init__(self, config):
859
+ super().__init__(config)
860
+
861
+ self.visual_bert = VisualBertModel(config)
862
+ self.cls = VisualBertPreTrainingHeads(config)
863
+
864
+ # Initialize weights and apply final processing
865
+ self.post_init()
866
+
867
+ def get_output_embeddings(self):
868
+ return self.cls.predictions.decoder
869
+
870
+ def set_output_embeddings(self, new_embeddings):
871
+ self.cls.predictions.decoder = new_embeddings
872
+
873
+ @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
874
+ @replace_return_docstrings(output_type=VisualBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
875
+ def forward(
876
+ self,
877
+ input_ids: Optional[torch.LongTensor] = None,
878
+ attention_mask: Optional[torch.LongTensor] = None,
879
+ token_type_ids: Optional[torch.LongTensor] = None,
880
+ position_ids: Optional[torch.LongTensor] = None,
881
+ head_mask: Optional[torch.LongTensor] = None,
882
+ inputs_embeds: Optional[torch.FloatTensor] = None,
883
+ visual_embeds: Optional[torch.FloatTensor] = None,
884
+ visual_attention_mask: Optional[torch.LongTensor] = None,
885
+ visual_token_type_ids: Optional[torch.LongTensor] = None,
886
+ image_text_alignment: Optional[torch.LongTensor] = None,
887
+ output_attentions: Optional[bool] = None,
888
+ output_hidden_states: Optional[bool] = None,
889
+ return_dict: Optional[bool] = None,
890
+ labels: Optional[torch.LongTensor] = None,
891
+ sentence_image_labels: Optional[torch.LongTensor] = None,
892
+ ) -> Union[Tuple[torch.Tensor], VisualBertForPreTrainingOutput]:
893
+ r"""
894
+ labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):
895
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
896
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
897
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
898
+ sentence_image_labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
899
+ Labels for computing the sentence-image prediction (classification) loss. Input should be a sequence pair
900
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
901
+
902
+ - 0 indicates sequence B is a matching pair of sequence A for the given image,
903
+ - 1 indicates sequence B is a random sequence w.r.t A for the given image.
904
+
905
+ Returns:
906
+
907
+ Example:
908
+
909
+ ```python
910
+ # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
911
+ from transformers import AutoTokenizer, VisualBertForPreTraining
912
+
913
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
914
+ model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
915
+
916
+ inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt")
917
+ visual_embeds = get_visual_embeddings(image).unsqueeze(0)
918
+ visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
919
+ visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
920
+
921
+ inputs.update(
922
+ {
923
+ "visual_embeds": visual_embeds,
924
+ "visual_token_type_ids": visual_token_type_ids,
925
+ "visual_attention_mask": visual_attention_mask,
926
+ }
927
+ )
928
+ max_length = inputs["input_ids"].shape[-1] + visual_embeds.shape[-2]
929
+ labels = tokenizer(
930
+ "The capital of France is Paris.", return_tensors="pt", padding="max_length", max_length=max_length
931
+ )["input_ids"]
932
+ sentence_image_labels = torch.tensor(1).unsqueeze(0) # Batch_size
933
+
934
+
935
+ outputs = model(**inputs, labels=labels, sentence_image_labels=sentence_image_labels)
936
+ loss = outputs.loss
937
+ prediction_logits = outputs.prediction_logits
938
+ seq_relationship_logits = outputs.seq_relationship_logits
939
+ ```"""
940
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
941
+
942
+ outputs = self.visual_bert(
943
+ input_ids,
944
+ attention_mask=attention_mask,
945
+ token_type_ids=token_type_ids,
946
+ position_ids=position_ids,
947
+ head_mask=head_mask,
948
+ inputs_embeds=inputs_embeds,
949
+ visual_embeds=visual_embeds,
950
+ visual_attention_mask=visual_attention_mask,
951
+ visual_token_type_ids=visual_token_type_ids,
952
+ image_text_alignment=image_text_alignment,
953
+ output_attentions=output_attentions,
954
+ output_hidden_states=output_hidden_states,
955
+ return_dict=return_dict,
956
+ )
957
+
958
+ sequence_output, pooled_output = outputs[:2]
959
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
960
+
961
+ total_loss = None
962
+ if labels is not None and sentence_image_labels is not None:
963
+ total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)
964
+ if labels.size(-1) != total_size:
965
+ raise ValueError(
966
+ "The labels provided should have same sequence length as total attention mask. "
967
+ f"Found labels with sequence length {labels.size(-1)}, expected {total_size}."
968
+ )
969
+
970
+ loss_fct = CrossEntropyLoss()
971
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
972
+ sentence_image_loss = loss_fct(seq_relationship_score.view(-1, 2), sentence_image_labels.view(-1))
973
+ total_loss = masked_lm_loss + sentence_image_loss
974
+
975
+ if labels is not None and sentence_image_labels is None:
976
+ total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)
977
+ if labels.size(-1) != total_size:
978
+ raise ValueError(
979
+ "The labels provided should have same sequence length as total attention mask. "
980
+ f"Found labels with sequence length {labels.size(-1)}, expected {total_size}."
981
+ )
982
+
983
+ loss_fct = CrossEntropyLoss()
984
+ total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
985
+
986
+ if not return_dict:
987
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
988
+ return ((total_loss,) + output) if total_loss is not None else output
989
+
990
+ return VisualBertForPreTrainingOutput(
991
+ loss=total_loss,
992
+ prediction_logits=prediction_scores,
993
+ seq_relationship_logits=seq_relationship_score,
994
+ hidden_states=outputs.hidden_states,
995
+ attentions=outputs.attentions,
996
+ )
997
+
998
+
999
+ @add_start_docstrings(
1000
+ """
1001
+ VisualBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1002
+ a softmax) e.g. for VCR tasks.
1003
+ """,
1004
+ VISUAL_BERT_START_DOCSTRING,
1005
+ )
1006
+ class VisualBertForMultipleChoice(VisualBertPreTrainedModel):
1007
+ def __init__(self, config):
1008
+ super().__init__(config)
1009
+
1010
+ self.visual_bert = VisualBertModel(config)
1011
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1012
+ self.cls = nn.Linear(config.hidden_size, 1)
1013
+
1014
+ # Initialize weights and apply final processing
1015
+ self.post_init()
1016
+
1017
+ @add_start_docstrings_to_model_forward(
1018
+ VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1019
+ )
1020
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
1021
+ def forward(
1022
+ self,
1023
+ input_ids: Optional[torch.LongTensor] = None,
1024
+ attention_mask: Optional[torch.LongTensor] = None,
1025
+ token_type_ids: Optional[torch.LongTensor] = None,
1026
+ position_ids: Optional[torch.LongTensor] = None,
1027
+ head_mask: Optional[torch.LongTensor] = None,
1028
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1029
+ visual_embeds: Optional[torch.FloatTensor] = None,
1030
+ visual_attention_mask: Optional[torch.LongTensor] = None,
1031
+ visual_token_type_ids: Optional[torch.LongTensor] = None,
1032
+ image_text_alignment: Optional[torch.LongTensor] = None,
1033
+ output_attentions: Optional[bool] = None,
1034
+ output_hidden_states: Optional[bool] = None,
1035
+ return_dict: Optional[bool] = None,
1036
+ labels: Optional[torch.LongTensor] = None,
1037
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1038
+ r"""
1039
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1040
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1041
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1042
+ `input_ids` above)
1043
+
1044
+ Returns:
1045
+
1046
+ Example:
1047
+
1048
+ ```python
1049
+ # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
1050
+ from transformers import AutoTokenizer, VisualBertForMultipleChoice
1051
+ import torch
1052
+
1053
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1054
+ model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr")
1055
+
1056
+ prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1057
+ choice0 = "It is eaten with a fork and a knife."
1058
+ choice1 = "It is eaten while held in the hand."
1059
+
1060
+ visual_embeds = get_visual_embeddings(image)
1061
+ # (batch_size, num_choices, visual_seq_length, visual_embedding_dim)
1062
+ visual_embeds = visual_embeds.expand(1, 2, *visual_embeds.shape)
1063
+ visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
1064
+ visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
1065
+
1066
+ labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
1067
+
1068
+ encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors="pt", padding=True)
1069
+ # batch size is 1
1070
+ inputs_dict = {k: v.unsqueeze(0) for k, v in encoding.items()}
1071
+ inputs_dict.update(
1072
+ {
1073
+ "visual_embeds": visual_embeds,
1074
+ "visual_attention_mask": visual_attention_mask,
1075
+ "visual_token_type_ids": visual_token_type_ids,
1076
+ "labels": labels,
1077
+ }
1078
+ )
1079
+ outputs = model(**inputs_dict)
1080
+
1081
+ loss = outputs.loss
1082
+ logits = outputs.logits
1083
+ ```"""
1084
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1085
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1086
+
1087
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1088
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1089
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1090
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1091
+ inputs_embeds = (
1092
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1093
+ if inputs_embeds is not None
1094
+ else None
1095
+ )
1096
+
1097
+ visual_embeds = (
1098
+ visual_embeds.view(-1, visual_embeds.size(-2), visual_embeds.size(-1))
1099
+ if visual_embeds is not None
1100
+ else None
1101
+ )
1102
+ visual_attention_mask = (
1103
+ visual_attention_mask.view(-1, visual_attention_mask.size(-1))
1104
+ if visual_attention_mask is not None
1105
+ else None
1106
+ )
1107
+ visual_token_type_ids = (
1108
+ visual_token_type_ids.view(-1, visual_token_type_ids.size(-1))
1109
+ if visual_token_type_ids is not None
1110
+ else None
1111
+ )
1112
+
1113
+ outputs = self.visual_bert(
1114
+ input_ids,
1115
+ attention_mask=attention_mask,
1116
+ token_type_ids=token_type_ids,
1117
+ position_ids=position_ids,
1118
+ head_mask=head_mask,
1119
+ inputs_embeds=inputs_embeds,
1120
+ visual_embeds=visual_embeds,
1121
+ visual_attention_mask=visual_attention_mask,
1122
+ visual_token_type_ids=visual_token_type_ids,
1123
+ image_text_alignment=image_text_alignment,
1124
+ output_attentions=output_attentions,
1125
+ output_hidden_states=output_hidden_states,
1126
+ return_dict=return_dict,
1127
+ )
1128
+
1129
+ _, pooled_output = outputs[0], outputs[1]
1130
+
1131
+ pooled_output = self.dropout(pooled_output)
1132
+ logits = self.cls(pooled_output)
1133
+ reshaped_logits = logits.view(-1, num_choices)
1134
+
1135
+ loss = None
1136
+ if labels is not None:
1137
+ loss_fct = CrossEntropyLoss()
1138
+ loss = loss_fct(reshaped_logits, labels)
1139
+
1140
+ if not return_dict:
1141
+ output = (reshaped_logits,) + outputs[2:]
1142
+ return ((loss,) + output) if loss is not None else output
1143
+
1144
+ return MultipleChoiceModelOutput(
1145
+ loss=loss,
1146
+ logits=reshaped_logits,
1147
+ hidden_states=outputs.hidden_states,
1148
+ attentions=outputs.attentions,
1149
+ )
1150
+
1151
+
1152
+ @add_start_docstrings(
1153
+ """
1154
+ VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled
1155
+ output) for VQA.
1156
+ """,
1157
+ VISUAL_BERT_START_DOCSTRING,
1158
+ )
1159
+ class VisualBertForQuestionAnswering(VisualBertPreTrainedModel):
1160
+ def __init__(self, config):
1161
+ super().__init__(config)
1162
+ self.num_labels = config.num_labels
1163
+
1164
+ self.visual_bert = VisualBertModel(config)
1165
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1166
+ self.cls = nn.Linear(config.hidden_size, config.num_labels)
1167
+
1168
+ # Initialize weights and apply final processing
1169
+ self.post_init()
1170
+
1171
+ @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1172
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1173
+ def forward(
1174
+ self,
1175
+ input_ids: Optional[torch.LongTensor] = None,
1176
+ attention_mask: Optional[torch.LongTensor] = None,
1177
+ token_type_ids: Optional[torch.LongTensor] = None,
1178
+ position_ids: Optional[torch.LongTensor] = None,
1179
+ head_mask: Optional[torch.LongTensor] = None,
1180
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1181
+ visual_embeds: Optional[torch.FloatTensor] = None,
1182
+ visual_attention_mask: Optional[torch.LongTensor] = None,
1183
+ visual_token_type_ids: Optional[torch.LongTensor] = None,
1184
+ image_text_alignment: Optional[torch.LongTensor] = None,
1185
+ output_attentions: Optional[bool] = None,
1186
+ output_hidden_states: Optional[bool] = None,
1187
+ return_dict: Optional[bool] = None,
1188
+ labels: Optional[torch.LongTensor] = None,
1189
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1190
+ r"""
1191
+ labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):
1192
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1193
+ config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
1194
+
1195
+ Returns:
1196
+
1197
+ Example:
1198
+
1199
+ ```python
1200
+ # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
1201
+ from transformers import AutoTokenizer, VisualBertForQuestionAnswering
1202
+ import torch
1203
+
1204
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1205
+ model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
1206
+
1207
+ text = "Who is eating the apple?"
1208
+ inputs = tokenizer(text, return_tensors="pt")
1209
+ visual_embeds = get_visual_embeddings(image).unsqueeze(0)
1210
+ visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
1211
+ visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
1212
+
1213
+ inputs.update(
1214
+ {
1215
+ "visual_embeds": visual_embeds,
1216
+ "visual_token_type_ids": visual_token_type_ids,
1217
+ "visual_attention_mask": visual_attention_mask,
1218
+ }
1219
+ )
1220
+
1221
+ labels = torch.tensor([[0.0, 1.0]]).unsqueeze(0) # Batch size 1, Num labels 2
1222
+
1223
+ outputs = model(**inputs, labels=labels)
1224
+ loss = outputs.loss
1225
+ scores = outputs.logits
1226
+ ```"""
1227
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1228
+
1229
+ # Get the index of the last text token
1230
+ index_to_gather = attention_mask.sum(1) - 2 # as in original code
1231
+
1232
+ outputs = self.visual_bert(
1233
+ input_ids,
1234
+ attention_mask=attention_mask,
1235
+ token_type_ids=token_type_ids,
1236
+ position_ids=position_ids,
1237
+ head_mask=head_mask,
1238
+ inputs_embeds=inputs_embeds,
1239
+ visual_embeds=visual_embeds,
1240
+ visual_attention_mask=visual_attention_mask,
1241
+ visual_token_type_ids=visual_token_type_ids,
1242
+ image_text_alignment=image_text_alignment,
1243
+ output_attentions=output_attentions,
1244
+ output_hidden_states=output_hidden_states,
1245
+ return_dict=return_dict,
1246
+ )
1247
+
1248
+ sequence_output = outputs[0]
1249
+
1250
+ # TO-CHECK: From the original code
1251
+ index_to_gather = (
1252
+ index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1))
1253
+ )
1254
+ pooled_output = torch.gather(sequence_output, 1, index_to_gather)
1255
+
1256
+ pooled_output = self.dropout(pooled_output)
1257
+ logits = self.cls(pooled_output)
1258
+ reshaped_logits = logits.view(-1, self.num_labels)
1259
+
1260
+ loss = None
1261
+ if labels is not None:
1262
+ loss_fct = nn.KLDivLoss(reduction="batchmean")
1263
+ log_softmax = nn.LogSoftmax(dim=-1)
1264
+ reshaped_logits = log_softmax(reshaped_logits)
1265
+ loss = loss_fct(reshaped_logits, labels.contiguous())
1266
+ if not return_dict:
1267
+ output = (reshaped_logits,) + outputs[2:]
1268
+ return ((loss,) + output) if loss is not None else output
1269
+
1270
+ return SequenceClassifierOutput(
1271
+ loss=loss,
1272
+ logits=reshaped_logits,
1273
+ hidden_states=outputs.hidden_states,
1274
+ attentions=outputs.attentions,
1275
+ )
1276
+
1277
+
1278
+ @add_start_docstrings(
1279
+ """
1280
+ VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled
1281
+ output) for Visual Reasoning e.g. for NLVR task.
1282
+ """,
1283
+ VISUAL_BERT_START_DOCSTRING,
1284
+ )
1285
+ class VisualBertForVisualReasoning(VisualBertPreTrainedModel):
1286
+ def __init__(self, config):
1287
+ super().__init__(config)
1288
+ self.num_labels = config.num_labels
1289
+
1290
+ self.visual_bert = VisualBertModel(config)
1291
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1292
+ self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2
1293
+
1294
+ # Initialize weights and apply final processing
1295
+ self.post_init()
1296
+
1297
+ @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1298
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1299
+ def forward(
1300
+ self,
1301
+ input_ids: Optional[torch.LongTensor] = None,
1302
+ attention_mask: Optional[torch.LongTensor] = None,
1303
+ token_type_ids: Optional[torch.LongTensor] = None,
1304
+ position_ids: Optional[torch.LongTensor] = None,
1305
+ head_mask: Optional[torch.LongTensor] = None,
1306
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1307
+ visual_embeds: Optional[torch.FloatTensor] = None,
1308
+ visual_attention_mask: Optional[torch.LongTensor] = None,
1309
+ visual_token_type_ids: Optional[torch.LongTensor] = None,
1310
+ image_text_alignment: Optional[torch.LongTensor] = None,
1311
+ output_attentions: Optional[bool] = None,
1312
+ output_hidden_states: Optional[bool] = None,
1313
+ return_dict: Optional[bool] = None,
1314
+ labels: Optional[torch.LongTensor] = None,
1315
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1316
+ r"""
1317
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1318
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1319
+ config.num_labels - 1]`. A classification loss is computed (Cross-Entropy) against these labels.
1320
+
1321
+ Returns:
1322
+
1323
+ Example:
1324
+
1325
+ ```python
1326
+ # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
1327
+ from transformers import AutoTokenizer, VisualBertForVisualReasoning
1328
+ import torch
1329
+
1330
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1331
+ model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2")
1332
+
1333
+ text = "Who is eating the apple?"
1334
+ inputs = tokenizer(text, return_tensors="pt")
1335
+ visual_embeds = get_visual_embeddings(image).unsqueeze(0)
1336
+ visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
1337
+ visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
1338
+
1339
+ inputs.update(
1340
+ {
1341
+ "visual_embeds": visual_embeds,
1342
+ "visual_token_type_ids": visual_token_type_ids,
1343
+ "visual_attention_mask": visual_attention_mask,
1344
+ }
1345
+ )
1346
+
1347
+ labels = torch.tensor(1).unsqueeze(0) # Batch size 1, Num choices 2
1348
+
1349
+ outputs = model(**inputs, labels=labels)
1350
+ loss = outputs.loss
1351
+ scores = outputs.logits
1352
+ ```"""
1353
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1354
+
1355
+ outputs = self.visual_bert(
1356
+ input_ids,
1357
+ attention_mask=attention_mask,
1358
+ token_type_ids=token_type_ids,
1359
+ position_ids=position_ids,
1360
+ head_mask=head_mask,
1361
+ inputs_embeds=inputs_embeds,
1362
+ visual_embeds=visual_embeds,
1363
+ visual_attention_mask=visual_attention_mask,
1364
+ visual_token_type_ids=visual_token_type_ids,
1365
+ image_text_alignment=image_text_alignment,
1366
+ output_attentions=output_attentions,
1367
+ output_hidden_states=output_hidden_states,
1368
+ return_dict=return_dict,
1369
+ )
1370
+
1371
+ # sequence_output = outputs[0]
1372
+ pooled_output = outputs[1]
1373
+ pooled_output = self.dropout(pooled_output)
1374
+ logits = self.cls(pooled_output)
1375
+ reshaped_logits = logits.contiguous()
1376
+
1377
+ loss = None
1378
+ if labels is not None:
1379
+ loss_fct = CrossEntropyLoss()
1380
+ loss = loss_fct(reshaped_logits, labels.view(-1))
1381
+
1382
+ if not return_dict:
1383
+ output = (logits,) + outputs[2:]
1384
+ return ((loss,) + output) if loss is not None else output
1385
+
1386
+ return SequenceClassifierOutput(
1387
+ loss=loss,
1388
+ logits=reshaped_logits,
1389
+ hidden_states=outputs.hidden_states,
1390
+ attentions=outputs.attentions,
1391
+ )
1392
+
1393
+
1394
+ class VisualBertRegionToPhraseAttention(nn.Module):
1395
+ def __init__(self, config):
1396
+ super().__init__()
1397
+ if config.hidden_size % config.num_attention_heads != 0:
1398
+ raise ValueError(
1399
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
1400
+ f"heads ({config.num_attention_heads})"
1401
+ )
1402
+ self.num_attention_heads = 1 # config.num_attention_heads
1403
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
1404
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
1405
+
1406
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
1407
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
1408
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
1409
+
1410
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
1411
+
1412
+ def transpose_for_scores(self, x):
1413
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
1414
+ x = x.view(*new_x_shape)
1415
+ return x.permute(0, 2, 1, 3)
1416
+
1417
+ def forward(self, query, key, attention_mask):
1418
+ attention_mask = attention_mask.to(query.dtype)
1419
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
1420
+ attention_mask = (1.0 - attention_mask) * torch.finfo(query.dtype).min
1421
+
1422
+ mixed_query_layer = self.query(query)
1423
+ mixed_key_layer = self.key(key)
1424
+
1425
+ query_layer = self.transpose_for_scores(mixed_query_layer)
1426
+ key_layer = self.transpose_for_scores(mixed_key_layer)
1427
+
1428
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
1429
+
1430
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
1431
+
1432
+ attention_scores = attention_scores + attention_mask
1433
+
1434
+ attention_scores = attention_scores.squeeze(1)
1435
+ return attention_scores
1436
+
1437
+
1438
+ @add_start_docstrings(
1439
+ """
1440
+ VisualBert Model with a Masked Language Modeling head and an attention layer on top for Region-to-Phrase Alignment
1441
+ e.g. for Flickr30 Entities task.
1442
+ """,
1443
+ VISUAL_BERT_START_DOCSTRING,
1444
+ )
1445
+ class VisualBertForRegionToPhraseAlignment(VisualBertPreTrainedModel):
1446
+ _tied_weights_keys = ["cls.predictions.decoder.bias"]
1447
+
1448
+ def __init__(self, config):
1449
+ super().__init__(config)
1450
+
1451
+ self.visual_bert = VisualBertModel(config)
1452
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1453
+ self.cls = VisualBertPreTrainingHeads(config)
1454
+ self.attention = VisualBertRegionToPhraseAttention(config)
1455
+
1456
+ # Initialize weights and apply final processing
1457
+ self.post_init()
1458
+
1459
+ @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1460
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1461
+ def forward(
1462
+ self,
1463
+ input_ids: Optional[torch.LongTensor] = None,
1464
+ attention_mask: Optional[torch.LongTensor] = None,
1465
+ token_type_ids: Optional[torch.LongTensor] = None,
1466
+ position_ids: Optional[torch.LongTensor] = None,
1467
+ head_mask: Optional[torch.LongTensor] = None,
1468
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1469
+ visual_embeds: Optional[torch.FloatTensor] = None,
1470
+ visual_attention_mask: Optional[torch.LongTensor] = None,
1471
+ visual_token_type_ids: Optional[torch.LongTensor] = None,
1472
+ image_text_alignment: Optional[torch.LongTensor] = None,
1473
+ output_attentions: Optional[bool] = None,
1474
+ output_hidden_states: Optional[bool] = None,
1475
+ return_dict: Optional[bool] = None,
1476
+ region_to_phrase_position: Optional[torch.LongTensor] = None,
1477
+ labels: Optional[torch.LongTensor] = None,
1478
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1479
+ r"""
1480
+ region_to_phrase_position (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):
1481
+ The positions depicting the position of the image embedding corresponding to the textual tokens.
1482
+
1483
+ labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length, visual_sequence_length)`, *optional*):
1484
+ Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and the
1485
+ outputs from the attention layer.
1486
+
1487
+ Returns:
1488
+
1489
+ Example:
1490
+
1491
+ ```python
1492
+ # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
1493
+ from transformers import AutoTokenizer, VisualBertForRegionToPhraseAlignment
1494
+ import torch
1495
+
1496
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1497
+ model = VisualBertForRegionToPhraseAlignment.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
1498
+
1499
+ text = "Who is eating the apple?"
1500
+ inputs = tokenizer(text, return_tensors="pt")
1501
+ visual_embeds = get_visual_embeddings(image).unsqueeze(0)
1502
+ visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
1503
+ visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
1504
+ region_to_phrase_position = torch.ones((1, inputs["input_ids"].shape[-1] + visual_embeds.shape[-2]))
1505
+
1506
+ inputs.update(
1507
+ {
1508
+ "region_to_phrase_position": region_to_phrase_position,
1509
+ "visual_embeds": visual_embeds,
1510
+ "visual_token_type_ids": visual_token_type_ids,
1511
+ "visual_attention_mask": visual_attention_mask,
1512
+ }
1513
+ )
1514
+
1515
+ labels = torch.ones(
1516
+ (1, inputs["input_ids"].shape[-1] + visual_embeds.shape[-2], visual_embeds.shape[-2])
1517
+ ) # Batch size 1
1518
+
1519
+ outputs = model(**inputs, labels=labels)
1520
+ loss = outputs.loss
1521
+ scores = outputs.logits
1522
+ ```"""
1523
+ if region_to_phrase_position is None:
1524
+ raise ValueError("`region_to_phrase_position` should not be None when using Flickr Model.")
1525
+
1526
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1527
+
1528
+ outputs = self.visual_bert(
1529
+ input_ids,
1530
+ attention_mask=attention_mask,
1531
+ token_type_ids=token_type_ids,
1532
+ position_ids=position_ids,
1533
+ head_mask=head_mask,
1534
+ inputs_embeds=inputs_embeds,
1535
+ visual_embeds=visual_embeds,
1536
+ visual_attention_mask=visual_attention_mask,
1537
+ visual_token_type_ids=visual_token_type_ids,
1538
+ image_text_alignment=image_text_alignment,
1539
+ output_attentions=output_attentions,
1540
+ output_hidden_states=output_hidden_states,
1541
+ return_dict=return_dict,
1542
+ )
1543
+
1544
+ sequence_output = outputs[0]
1545
+
1546
+ region_to_phrase_position_mask = (region_to_phrase_position != -1).long()
1547
+
1548
+ # Make the -1 become 0
1549
+ region_to_phrase_position = region_to_phrase_position * region_to_phrase_position_mask
1550
+
1551
+ # Selected_positions = batch x selected position x dim
1552
+ expanded_region_to_phrase_positions = region_to_phrase_position.unsqueeze(2).expand(
1553
+ region_to_phrase_position.size(0), region_to_phrase_position.size(1), sequence_output.size(2)
1554
+ )
1555
+ selected_positions = sequence_output.gather(1, expanded_region_to_phrase_positions)
1556
+
1557
+ # Visual Features = batch x visual_feature_length x dim
1558
+ # This will need separate image and visual masks.
1559
+ visual_features = sequence_output[:, attention_mask.size(1) :]
1560
+
1561
+ if visual_features.size(1) != visual_attention_mask.size(1):
1562
+ raise ValueError(
1563
+ f"Visual features length :{visual_features.size(1)} should be the same"
1564
+ f" as visual attention mask length: {visual_attention_mask.size(1)}."
1565
+ )
1566
+
1567
+ logits = self.attention(selected_positions, visual_features, visual_attention_mask)
1568
+
1569
+ loss = None
1570
+
1571
+ if labels is not None:
1572
+ # scores = batch x selected position x visual_feature
1573
+ # scores = selected_positions.bmm(visual_features.transpose(1,2))
1574
+ # label = batch x selected_postion x needed position
1575
+ loss_fct = KLDivLoss(reduction="batchmean")
1576
+ log_softmax = LogSoftmax(dim=-1)
1577
+ scores = log_softmax(logits)
1578
+ labels = labels.contiguous()
1579
+ loss = loss_fct(scores, labels)
1580
+
1581
+ if not return_dict:
1582
+ output = (logits,) + outputs[2:]
1583
+ return ((loss,) + output) if loss is not None else output
1584
+
1585
+ return SequenceClassifierOutput(
1586
+ loss=loss,
1587
+ logits=logits,
1588
+ hidden_states=outputs.hidden_states,
1589
+ attentions=outputs.attentions,
1590
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/vitmatte/__init__.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ is_vision_available,
21
+ )
22
+
23
+
24
+ _import_structure = {"configuration_vitmatte": ["VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitMatteConfig"]}
25
+
26
+ try:
27
+ if not is_vision_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["image_processing_vitmatte"] = ["VitMatteImageProcessor"]
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_vitmatte"] = [
41
+ "VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "VitMattePreTrainedModel",
43
+ "VitMatteForImageMatting",
44
+ ]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_vitmatte import VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP, VitMatteConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_vitmatte import VitMatteImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_vitmatte import (
64
+ VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ VitMatteForImageMatting,
66
+ VitMattePreTrainedModel,
67
+ )
68
+
69
+ else:
70
+ import sys
71
+
72
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/vitmatte/image_processing_vitmatte.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ViTMatte."""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
22
+ from ...image_transforms import pad, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ get_image_size,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ validate_kwargs,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, logging
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ class VitMatteImageProcessor(BaseImageProcessor):
44
+ r"""
45
+ Constructs a ViTMatte image processor.
46
+
47
+ Args:
48
+ do_rescale (`bool`, *optional*, defaults to `True`):
49
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
50
+ parameter in the `preprocess` method.
51
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
52
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
53
+ `preprocess` method.
54
+ do_normalize (`bool`, *optional*, defaults to `True`):
55
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
56
+ method.
57
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
58
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
59
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
60
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
61
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
62
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
63
+ do_pad (`bool`, *optional*, defaults to `True`):
64
+ Whether to pad the image to make the width and height divisible by `size_divisibility`. Can be overridden
65
+ by the `do_pad` parameter in the `preprocess` method.
66
+ size_divisibility (`int`, *optional*, defaults to 32):
67
+ The width and height of the image will be padded to be divisible by this number.
68
+ """
69
+
70
+ model_input_names = ["pixel_values"]
71
+
72
+ def __init__(
73
+ self,
74
+ do_rescale: bool = True,
75
+ rescale_factor: Union[int, float] = 1 / 255,
76
+ do_normalize: bool = True,
77
+ image_mean: Optional[Union[float, List[float]]] = None,
78
+ image_std: Optional[Union[float, List[float]]] = None,
79
+ do_pad: bool = True,
80
+ size_divisibility: int = 32,
81
+ **kwargs,
82
+ ) -> None:
83
+ super().__init__(**kwargs)
84
+ self.do_rescale = do_rescale
85
+ self.do_normalize = do_normalize
86
+ self.do_pad = do_pad
87
+ self.rescale_factor = rescale_factor
88
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
89
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
90
+ self.size_divisibility = size_divisibility
91
+ self._valid_processor_keys = [
92
+ "images",
93
+ "trimaps",
94
+ "do_rescale",
95
+ "rescale_factor",
96
+ "do_normalize",
97
+ "image_mean",
98
+ "image_std",
99
+ "do_pad",
100
+ "size_divisibility",
101
+ "return_tensors",
102
+ "data_format",
103
+ "input_data_format",
104
+ ]
105
+
106
+ def pad_image(
107
+ self,
108
+ image: np.ndarray,
109
+ size_divisibility: int = 32,
110
+ data_format: Optional[Union[str, ChannelDimension]] = None,
111
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
112
+ ) -> np.ndarray:
113
+ """
114
+ Args:
115
+ image (`np.ndarray`):
116
+ Image to pad.
117
+ size_divisibility (`int`, *optional*, defaults to 32):
118
+ The width and height of the image will be padded to be divisible by this number.
119
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
120
+ The channel dimension format for the output image. Can be one of:
121
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
122
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
123
+ - Unset: Use the channel dimension format of the input image.
124
+ input_data_format (`ChannelDimension` or `str`, *optional*):
125
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
126
+ from the input image. Can be one of:
127
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
128
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
129
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
130
+ """
131
+ if input_data_format is None:
132
+ input_data_format = infer_channel_dimension_format(image)
133
+
134
+ height, width = get_image_size(image, input_data_format)
135
+
136
+ if height % size_divisibility != 0 or width % size_divisibility != 0:
137
+ pad_height = size_divisibility - height % size_divisibility
138
+ pad_width = size_divisibility - width % size_divisibility
139
+ padding = ((0, pad_height), (0, pad_width))
140
+ image = pad(image, padding=padding, data_format=data_format, input_data_format=input_data_format)
141
+
142
+ if data_format is not None:
143
+ image = to_channel_dimension_format(image, data_format, input_data_format)
144
+
145
+ return image
146
+
147
+ def preprocess(
148
+ self,
149
+ images: ImageInput,
150
+ trimaps: ImageInput,
151
+ do_rescale: Optional[bool] = None,
152
+ rescale_factor: Optional[float] = None,
153
+ do_normalize: Optional[bool] = None,
154
+ image_mean: Optional[Union[float, List[float]]] = None,
155
+ image_std: Optional[Union[float, List[float]]] = None,
156
+ do_pad: Optional[bool] = None,
157
+ size_divisibility: Optional[int] = None,
158
+ return_tensors: Optional[Union[str, TensorType]] = None,
159
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
160
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
161
+ **kwargs,
162
+ ):
163
+ """
164
+ Preprocess an image or batch of images.
165
+
166
+ Args:
167
+ images (`ImageInput`):
168
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
169
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
170
+ trimaps (`ImageInput`):
171
+ Trimap to preprocess.
172
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
173
+ Whether to rescale the image values between [0 - 1].
174
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
175
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
176
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
177
+ Whether to normalize the image.
178
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
179
+ Image mean to use if `do_normalize` is set to `True`.
180
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
181
+ Image standard deviation to use if `do_normalize` is set to `True`.
182
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
183
+ Whether to pad the image.
184
+ size_divisibility (`int`, *optional*, defaults to `self.size_divisibility`):
185
+ The size divisibility to pad the image to if `do_pad` is set to `True`.
186
+ return_tensors (`str` or `TensorType`, *optional*):
187
+ The type of tensors to return. Can be one of:
188
+ - Unset: Return a list of `np.ndarray`.
189
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
190
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
191
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
192
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
193
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
194
+ The channel dimension format for the output image. Can be one of:
195
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
196
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
197
+ - Unset: Use the channel dimension format of the input image.
198
+ input_data_format (`ChannelDimension` or `str`, *optional*):
199
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
200
+ from the input image. Can be one of:
201
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
202
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
203
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
204
+ """
205
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
206
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
207
+ do_pad = do_pad if do_pad is not None else self.do_pad
208
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
209
+ image_mean = image_mean if image_mean is not None else self.image_mean
210
+ image_std = image_std if image_std is not None else self.image_std
211
+ size_divisibility = size_divisibility if size_divisibility is not None else self.size_divisibility
212
+
213
+ images = make_list_of_images(images)
214
+ trimaps = make_list_of_images(trimaps, expected_ndims=2)
215
+
216
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
217
+
218
+ if not valid_images(trimaps):
219
+ raise ValueError(
220
+ "Invalid trimap type. Must be of type PIL.Image.Image, numpy.ndarray, "
221
+ "torch.Tensor, tf.Tensor or jax.ndarray."
222
+ )
223
+
224
+ if not valid_images(images):
225
+ raise ValueError(
226
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
227
+ "torch.Tensor, tf.Tensor or jax.ndarray."
228
+ )
229
+ validate_preprocess_arguments(
230
+ do_rescale=do_rescale,
231
+ rescale_factor=rescale_factor,
232
+ do_normalize=do_normalize,
233
+ image_mean=image_mean,
234
+ image_std=image_std,
235
+ do_pad=do_pad,
236
+ size_divisibility=size_divisibility,
237
+ )
238
+
239
+ # All transformations expect numpy arrays.
240
+ images = [to_numpy_array(image) for image in images]
241
+ trimaps = [to_numpy_array(trimap) for trimap in trimaps]
242
+
243
+ if is_scaled_image(images[0]) and do_rescale:
244
+ logger.warning_once(
245
+ "It looks like you are trying to rescale already rescaled images. If the input"
246
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
247
+ )
248
+
249
+ if input_data_format is None:
250
+ # We assume that all images have the same channel dimension format.
251
+ input_data_format = infer_channel_dimension_format(images[0])
252
+
253
+ if do_rescale:
254
+ images = [
255
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
256
+ for image in images
257
+ ]
258
+ trimaps = [
259
+ self.rescale(image=trimap, scale=rescale_factor, input_data_format=input_data_format)
260
+ for trimap in trimaps
261
+ ]
262
+
263
+ if do_normalize:
264
+ images = [
265
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
266
+ for image in images
267
+ ]
268
+
269
+ # concatenate images and trimaps
270
+ images = [
271
+ np.concatenate([image, np.expand_dims(trimap, axis=-1)], axis=-1) for image, trimap in zip(images, trimaps)
272
+ ]
273
+
274
+ if do_pad:
275
+ images = [
276
+ self.pad_image(image, size_divisibility=size_divisibility, input_data_format=input_data_format)
277
+ for image in images
278
+ ]
279
+
280
+ images = [
281
+ to_channel_dimension_format(image=image, channel_dim=data_format, input_channel_dim=input_data_format)
282
+ for image in images
283
+ ]
284
+
285
+ data = {"pixel_values": images}
286
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2/__init__.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
27
+ "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
28
+ "processing_wav2vec2": ["Wav2Vec2Processor"],
29
+ "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
30
+ }
31
+
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_wav2vec2"] = [
40
+ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "Wav2Vec2ForAudioFrameClassification",
42
+ "Wav2Vec2ForCTC",
43
+ "Wav2Vec2ForMaskedLM",
44
+ "Wav2Vec2ForPreTraining",
45
+ "Wav2Vec2ForSequenceClassification",
46
+ "Wav2Vec2ForXVector",
47
+ "Wav2Vec2Model",
48
+ "Wav2Vec2PreTrainedModel",
49
+ ]
50
+
51
+ try:
52
+ if not is_tf_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ _import_structure["modeling_tf_wav2vec2"] = [
58
+ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
59
+ "TFWav2Vec2ForCTC",
60
+ "TFWav2Vec2Model",
61
+ "TFWav2Vec2PreTrainedModel",
62
+ "TFWav2Vec2ForSequenceClassification",
63
+ ]
64
+
65
+ try:
66
+ if not is_flax_available():
67
+ raise OptionalDependencyNotAvailable()
68
+ except OptionalDependencyNotAvailable:
69
+ pass
70
+ else:
71
+ _import_structure["modeling_flax_wav2vec2"] = [
72
+ "FlaxWav2Vec2ForCTC",
73
+ "FlaxWav2Vec2ForPreTraining",
74
+ "FlaxWav2Vec2Model",
75
+ "FlaxWav2Vec2PreTrainedModel",
76
+ ]
77
+
78
+
79
+ if TYPE_CHECKING:
80
+ from .configuration_wav2vec2 import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2Config
81
+ from .feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor
82
+ from .processing_wav2vec2 import Wav2Vec2Processor
83
+ from .tokenization_wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2Tokenizer
84
+
85
+ try:
86
+ if not is_torch_available():
87
+ raise OptionalDependencyNotAvailable()
88
+ except OptionalDependencyNotAvailable:
89
+ pass
90
+ else:
91
+ from .modeling_wav2vec2 import (
92
+ WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
93
+ Wav2Vec2ForAudioFrameClassification,
94
+ Wav2Vec2ForCTC,
95
+ Wav2Vec2ForMaskedLM,
96
+ Wav2Vec2ForPreTraining,
97
+ Wav2Vec2ForSequenceClassification,
98
+ Wav2Vec2ForXVector,
99
+ Wav2Vec2Model,
100
+ Wav2Vec2PreTrainedModel,
101
+ )
102
+
103
+ try:
104
+ if not is_tf_available():
105
+ raise OptionalDependencyNotAvailable()
106
+ except OptionalDependencyNotAvailable:
107
+ pass
108
+ else:
109
+ from .modeling_tf_wav2vec2 import (
110
+ TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
111
+ TFWav2Vec2ForCTC,
112
+ TFWav2Vec2ForSequenceClassification,
113
+ TFWav2Vec2Model,
114
+ TFWav2Vec2PreTrainedModel,
115
+ )
116
+
117
+ try:
118
+ if not is_flax_available():
119
+ raise OptionalDependencyNotAvailable()
120
+ except OptionalDependencyNotAvailable:
121
+ pass
122
+ else:
123
+ from .modeling_tf_wav2vec2 import (
124
+ FlaxWav2Vec2ForCTC,
125
+ FlaxWav2Vec2ForPreTraining,
126
+ FlaxWav2Vec2Model,
127
+ FlaxWav2Vec2PreTrainedModel,
128
+ )
129
+
130
+
131
+ else:
132
+ import sys
133
+
134
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)