applied-ai-018 commited on
Commit
83b2c57
·
verified ·
1 Parent(s): 45a4c44

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/15.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/15.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/15.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/15.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step40/zero/17.input_layernorm.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step40/zero/17.input_layernorm.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  8. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py +138 -0
  9. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py +389 -0
  13. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py +1567 -0
  14. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +1522 -0
  15. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +1526 -0
  16. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py +240 -0
  17. venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py +120 -0
  18. venv/lib/python3.10/site-packages/transformers/models/canine/__init__.py +69 -0
  19. venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/convert_canine_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/modeling_canine.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/canine/configuration_canine.py +141 -0
  25. venv/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py +66 -0
  26. venv/lib/python3.10/site-packages/transformers/models/canine/modeling_canine.py +1645 -0
  27. venv/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py +241 -0
  28. venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/configuration_cvt.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_cvt.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__init__.py +144 -0
  32. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/configuration_layoutlmv3.py +293 -0
  42. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py +35 -0
  43. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/image_processing_layoutlmv3.py +387 -0
  44. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_layoutlmv3.py +1371 -0
  45. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +1778 -0
  46. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/processing_layoutlmv3.py +199 -0
  47. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3.py +1461 -0
  48. venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py +837 -0
  49. venv/lib/python3.10/site-packages/transformers/models/mgp_str/__init__.py +62 -0
  50. venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/processing_mgp_str.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/15.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356bd565dcff7268eba1bfab4181403464b21dd33b3026ee39f496e61c530db8
3
+ size 50332828
ckpts/universal/global_step40/zero/15.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9f9d524a7999c2e17114f6075c41c53de5795b99e8ac2afcda3fa3b10cf1827
3
+ size 50332749
ckpts/universal/global_step40/zero/15.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb920f48bde1afa9dfd1393ee33438e0a49e1241b2cab66302f8d26f9e2dfd18
3
+ size 33555627
ckpts/universal/global_step40/zero/15.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e68740b23c565dd5449492664b90f136635c819b760559dae9c1697d921c684c
3
+ size 33555533
ckpts/universal/global_step40/zero/17.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46c0a05889d528bf247bf6c9e0ab1519cef0fe6b048e83c76fc30ec3457b744a
3
+ size 9372
ckpts/universal/global_step40/zero/17.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfa9be9b1d68dc92c2af29b2244988e885fc7b45d4c8d67570cc835e0ca6cdc7
3
+ size 9293
ckpts/universal/global_step40/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed184f69e6a8bbc49a2162014690700fa2cc67748416238c1b8348558a1bcddf
3
+ size 33555627
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_blenderbot_small": [
28
+ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "BlenderbotSmallConfig",
30
+ "BlenderbotSmallOnnxConfig",
31
+ ],
32
+ "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_blenderbot_small_fast"] = ["BlenderbotSmallTokenizerFast"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_blenderbot_small"] = [
50
+ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "BlenderbotSmallForCausalLM",
52
+ "BlenderbotSmallForConditionalGeneration",
53
+ "BlenderbotSmallModel",
54
+ "BlenderbotSmallPreTrainedModel",
55
+ ]
56
+
57
+ try:
58
+ if not is_tf_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ _import_structure["modeling_tf_blenderbot_small"] = [
64
+ "TFBlenderbotSmallForConditionalGeneration",
65
+ "TFBlenderbotSmallModel",
66
+ "TFBlenderbotSmallPreTrainedModel",
67
+ ]
68
+
69
+ try:
70
+ if not is_flax_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ _import_structure["modeling_flax_blenderbot_small"] = [
76
+ "FlaxBlenderbotSmallForConditionalGeneration",
77
+ "FlaxBlenderbotSmallModel",
78
+ "FlaxBlenderbotSmallPreTrainedModel",
79
+ ]
80
+
81
+ if TYPE_CHECKING:
82
+ from .configuration_blenderbot_small import (
83
+ BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
84
+ BlenderbotSmallConfig,
85
+ BlenderbotSmallOnnxConfig,
86
+ )
87
+ from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
88
+
89
+ try:
90
+ if not is_tokenizers_available():
91
+ raise OptionalDependencyNotAvailable()
92
+ except OptionalDependencyNotAvailable:
93
+ pass
94
+ else:
95
+ from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
96
+
97
+ try:
98
+ if not is_torch_available():
99
+ raise OptionalDependencyNotAvailable()
100
+ except OptionalDependencyNotAvailable:
101
+ pass
102
+ else:
103
+ from .modeling_blenderbot_small import (
104
+ BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
105
+ BlenderbotSmallForCausalLM,
106
+ BlenderbotSmallForConditionalGeneration,
107
+ BlenderbotSmallModel,
108
+ BlenderbotSmallPreTrainedModel,
109
+ )
110
+
111
+ try:
112
+ if not is_tf_available():
113
+ raise OptionalDependencyNotAvailable()
114
+ except OptionalDependencyNotAvailable:
115
+ pass
116
+ else:
117
+ from .modeling_tf_blenderbot_small import (
118
+ TFBlenderbotSmallForConditionalGeneration,
119
+ TFBlenderbotSmallModel,
120
+ TFBlenderbotSmallPreTrainedModel,
121
+ )
122
+
123
+ try:
124
+ if not is_flax_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_flax_blenderbot_small import (
130
+ FlaxBlenderbotSmallForConditionalGeneration,
131
+ FlaxBlenderbotSmallModel,
132
+ FlaxBlenderbotSmallPreTrainedModel,
133
+ )
134
+
135
+ else:
136
+ import sys
137
+
138
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc ADDED
Binary file (52.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc ADDED
Binary file (43.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc ADDED
Binary file (49.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BlenderbotSmall model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...file_utils import TensorType, is_torch_available
23
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
24
+ from ...onnx.utils import compute_effective_axis_dimension
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class BlenderbotSmallConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate
36
+ an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall
38
+ [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+
44
+ Args:
45
+ vocab_size (`int`, *optional*, defaults to 50265):
46
+ Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be
47
+ represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`].
48
+ d_model (`int`, *optional*, defaults to 512):
49
+ Dimensionality of the layers and the pooler layer.
50
+ encoder_layers (`int`, *optional*, defaults to 8):
51
+ Number of encoder layers.
52
+ decoder_layers (`int`, *optional*, defaults to 8):
53
+ Number of decoder layers.
54
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
57
+ Number of attention heads for each attention layer in the Transformer decoder.
58
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
59
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
60
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
61
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
62
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
63
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
64
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
65
+ dropout (`float`, *optional*, defaults to 0.1):
66
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
67
+ attention_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for the attention probabilities.
69
+ activation_dropout (`float`, *optional*, defaults to 0.0):
70
+ The dropout ratio for activations inside the fully connected layer.
71
+ max_position_embeddings (`int`, *optional*, defaults to 512):
72
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
73
+ just in case (e.g., 512 or 1024 or 2048).
74
+ init_std (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
77
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
78
+ for more details.
79
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
80
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
81
+ for more details.
82
+ scale_embedding (`bool`, *optional*, defaults to `False`):
83
+ Scale embeddings by diving by sqrt(d_model).
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models)
86
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
87
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
88
+ `eos_token_id`.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel
94
+
95
+ >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration
96
+ >>> configuration = BlenderbotSmallConfig()
97
+
98
+ >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration
99
+ >>> model = BlenderbotSmallModel(configuration)
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config
103
+ ```"""
104
+
105
+ model_type = "blenderbot-small"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_size=50265,
112
+ max_position_embeddings=512,
113
+ encoder_layers=8,
114
+ encoder_ffn_dim=2048,
115
+ encoder_attention_heads=16,
116
+ decoder_layers=8,
117
+ decoder_ffn_dim=2048,
118
+ decoder_attention_heads=16,
119
+ encoder_layerdrop=0.0,
120
+ decoder_layerdrop=0.0,
121
+ use_cache=True,
122
+ is_encoder_decoder=True,
123
+ activation_function="gelu",
124
+ d_model=512,
125
+ dropout=0.1,
126
+ attention_dropout=0.0,
127
+ activation_dropout=0.0,
128
+ init_std=0.02,
129
+ decoder_start_token_id=1,
130
+ scale_embedding=False,
131
+ pad_token_id=0,
132
+ bos_token_id=1,
133
+ eos_token_id=2,
134
+ forced_eos_token_id=2,
135
+ **kwargs,
136
+ ):
137
+ self.vocab_size = vocab_size
138
+ self.max_position_embeddings = max_position_embeddings
139
+ self.d_model = d_model
140
+ self.encoder_ffn_dim = encoder_ffn_dim
141
+ self.encoder_layers = encoder_layers
142
+ self.encoder_attention_heads = encoder_attention_heads
143
+ self.decoder_ffn_dim = decoder_ffn_dim
144
+ self.decoder_layers = decoder_layers
145
+ self.decoder_attention_heads = decoder_attention_heads
146
+ self.dropout = dropout
147
+ self.attention_dropout = attention_dropout
148
+ self.activation_dropout = activation_dropout
149
+ self.activation_function = activation_function
150
+ self.init_std = init_std
151
+ self.encoder_layerdrop = encoder_layerdrop
152
+ self.decoder_layerdrop = decoder_layerdrop
153
+ self.use_cache = use_cache
154
+ self.num_hidden_layers = encoder_layers
155
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
156
+
157
+ super().__init__(
158
+ pad_token_id=pad_token_id,
159
+ bos_token_id=bos_token_id,
160
+ eos_token_id=eos_token_id,
161
+ is_encoder_decoder=is_encoder_decoder,
162
+ decoder_start_token_id=decoder_start_token_id,
163
+ forced_eos_token_id=forced_eos_token_id,
164
+ **kwargs,
165
+ )
166
+
167
+
168
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig
169
+ class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast):
170
+ @property
171
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
172
+ if self.task in ["default", "seq2seq-lm"]:
173
+ common_inputs = OrderedDict(
174
+ [
175
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
176
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
177
+ ]
178
+ )
179
+
180
+ if self.use_past:
181
+ common_inputs["decoder_input_ids"] = {0: "batch"}
182
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
183
+ else:
184
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
185
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
186
+
187
+ if self.use_past:
188
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
189
+ elif self.task == "causal-lm":
190
+ # TODO: figure this case out.
191
+ common_inputs = OrderedDict(
192
+ [
193
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
194
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
195
+ ]
196
+ )
197
+ if self.use_past:
198
+ num_encoder_layers, _ = self.num_layers
199
+ for i in range(num_encoder_layers):
200
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
201
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
202
+ else:
203
+ common_inputs = OrderedDict(
204
+ [
205
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
206
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
207
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
208
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
209
+ ]
210
+ )
211
+
212
+ return common_inputs
213
+
214
+ @property
215
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
216
+ if self.task in ["default", "seq2seq-lm"]:
217
+ common_outputs = super().outputs
218
+ else:
219
+ common_outputs = super(OnnxConfigWithPast, self).outputs
220
+ if self.use_past:
221
+ num_encoder_layers, _ = self.num_layers
222
+ for i in range(num_encoder_layers):
223
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
224
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
225
+ return common_outputs
226
+
227
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
228
+ self,
229
+ tokenizer: PreTrainedTokenizer,
230
+ batch_size: int = -1,
231
+ seq_length: int = -1,
232
+ is_pair: bool = False,
233
+ framework: Optional[TensorType] = None,
234
+ ) -> Mapping[str, Any]:
235
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
236
+ tokenizer, batch_size, seq_length, is_pair, framework
237
+ )
238
+
239
+ # Generate decoder inputs
240
+ decoder_seq_length = seq_length if not self.use_past else 1
241
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
242
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
243
+ )
244
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
245
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
246
+
247
+ if self.use_past:
248
+ if not is_torch_available():
249
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
250
+ else:
251
+ import torch
252
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
253
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
254
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
255
+ encoder_shape = (
256
+ batch,
257
+ num_encoder_attention_heads,
258
+ encoder_seq_length,
259
+ self._config.hidden_size // num_encoder_attention_heads,
260
+ )
261
+ decoder_past_length = decoder_seq_length + 3
262
+ decoder_shape = (
263
+ batch,
264
+ num_decoder_attention_heads,
265
+ decoder_past_length,
266
+ self._config.hidden_size // num_decoder_attention_heads,
267
+ )
268
+
269
+ common_inputs["decoder_attention_mask"] = torch.cat(
270
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
271
+ )
272
+
273
+ common_inputs["past_key_values"] = []
274
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
275
+ num_encoder_layers, num_decoder_layers = self.num_layers
276
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
277
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
278
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
279
+
280
+ for _ in range(min_num_layers):
281
+ common_inputs["past_key_values"].append(
282
+ (
283
+ torch.zeros(decoder_shape),
284
+ torch.zeros(decoder_shape),
285
+ torch.zeros(encoder_shape),
286
+ torch.zeros(encoder_shape),
287
+ )
288
+ )
289
+ # TODO: test this.
290
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
291
+ for _ in range(min_num_layers, max_num_layers):
292
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
293
+ return common_inputs
294
+
295
+ def _generate_dummy_inputs_for_causal_lm(
296
+ self,
297
+ tokenizer: PreTrainedTokenizer,
298
+ batch_size: int = -1,
299
+ seq_length: int = -1,
300
+ is_pair: bool = False,
301
+ framework: Optional[TensorType] = None,
302
+ ) -> Mapping[str, Any]:
303
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
304
+ tokenizer, batch_size, seq_length, is_pair, framework
305
+ )
306
+
307
+ if self.use_past:
308
+ if not is_torch_available():
309
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
310
+ else:
311
+ import torch
312
+ batch, seqlen = common_inputs["input_ids"].shape
313
+ # Not using the same length for past_key_values
314
+ past_key_values_length = seqlen + 2
315
+ num_encoder_layers, _ = self.num_layers
316
+ num_encoder_attention_heads, _ = self.num_attention_heads
317
+ past_shape = (
318
+ batch,
319
+ num_encoder_attention_heads,
320
+ past_key_values_length,
321
+ self._config.hidden_size // num_encoder_attention_heads,
322
+ )
323
+
324
+ mask_dtype = common_inputs["attention_mask"].dtype
325
+ common_inputs["attention_mask"] = torch.cat(
326
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
327
+ )
328
+ common_inputs["past_key_values"] = [
329
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
330
+ ]
331
+ return common_inputs
332
+
333
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
334
+ self,
335
+ tokenizer: PreTrainedTokenizer,
336
+ batch_size: int = -1,
337
+ seq_length: int = -1,
338
+ is_pair: bool = False,
339
+ framework: Optional[TensorType] = None,
340
+ ) -> Mapping[str, Any]:
341
+ # Copied from OnnxConfig.generate_dummy_inputs
342
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
343
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
344
+ batch_size = compute_effective_axis_dimension(
345
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
346
+ )
347
+
348
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
349
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
350
+ seq_length = compute_effective_axis_dimension(
351
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
352
+ )
353
+
354
+ # Generate dummy inputs according to compute batch and sequence
355
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
356
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
357
+ return common_inputs
358
+
359
+ def generate_dummy_inputs(
360
+ self,
361
+ tokenizer: PreTrainedTokenizer,
362
+ batch_size: int = -1,
363
+ seq_length: int = -1,
364
+ is_pair: bool = False,
365
+ framework: Optional[TensorType] = None,
366
+ ) -> Mapping[str, Any]:
367
+ if self.task in ["default", "seq2seq-lm"]:
368
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
369
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
370
+ )
371
+
372
+ elif self.task == "causal-lm":
373
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
374
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
375
+ )
376
+ else:
377
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
378
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
379
+ )
380
+
381
+ return common_inputs
382
+
383
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
384
+ if self.task in ["default", "seq2seq-lm"]:
385
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
386
+ else:
387
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
388
+ flattened_output, name, idx, t
389
+ )
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py ADDED
@@ -0,0 +1,1567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BlenderbotSmall model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ Seq2SeqLMOutput,
34
+ Seq2SeqModelOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_end_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
50
+
51
+
52
+ from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
56
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
57
+ """
58
+ Shift input ids one token to the right.
59
+ """
60
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
61
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
62
+ shifted_input_ids[:, 0] = decoder_start_token_id
63
+
64
+ if pad_token_id is None:
65
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
66
+ # replace possible -100 values in labels by `pad_token_id`
67
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
68
+
69
+ return shifted_input_ids
70
+
71
+
72
+ # Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
73
+ class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding):
74
+ """
75
+ This module learns positional embeddings up to a fixed maximum size.
76
+ """
77
+
78
+ def __init__(self, num_embeddings: int, embedding_dim: int):
79
+ super().__init__(num_embeddings, embedding_dim)
80
+
81
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
82
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
83
+ bsz, seq_len = input_ids_shape[:2]
84
+ positions = torch.arange(
85
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
86
+ )
87
+ return super().forward(positions)
88
+
89
+
90
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
91
+ class BlenderbotSmallAttention(nn.Module):
92
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
93
+
94
+ def __init__(
95
+ self,
96
+ embed_dim: int,
97
+ num_heads: int,
98
+ dropout: float = 0.0,
99
+ is_decoder: bool = False,
100
+ bias: bool = True,
101
+ is_causal: bool = False,
102
+ config: Optional[BlenderbotSmallConfig] = None,
103
+ ):
104
+ super().__init__()
105
+ self.embed_dim = embed_dim
106
+ self.num_heads = num_heads
107
+ self.dropout = dropout
108
+ self.head_dim = embed_dim // num_heads
109
+ self.config = config
110
+
111
+ if (self.head_dim * num_heads) != self.embed_dim:
112
+ raise ValueError(
113
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
114
+ f" and `num_heads`: {num_heads})."
115
+ )
116
+ self.scaling = self.head_dim**-0.5
117
+ self.is_decoder = is_decoder
118
+ self.is_causal = is_causal
119
+
120
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
121
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
122
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
123
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
124
+
125
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
126
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
127
+
128
+ def forward(
129
+ self,
130
+ hidden_states: torch.Tensor,
131
+ key_value_states: Optional[torch.Tensor] = None,
132
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
133
+ attention_mask: Optional[torch.Tensor] = None,
134
+ layer_head_mask: Optional[torch.Tensor] = None,
135
+ output_attentions: bool = False,
136
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
137
+ """Input shape: Batch x Time x Channel"""
138
+
139
+ # if key_value_states are provided this layer is used as a cross-attention layer
140
+ # for the decoder
141
+ is_cross_attention = key_value_states is not None
142
+
143
+ bsz, tgt_len, _ = hidden_states.size()
144
+
145
+ # get query proj
146
+ query_states = self.q_proj(hidden_states) * self.scaling
147
+ # get key, value proj
148
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
149
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
150
+ # the provided `key_value_states` to support prefix tuning
151
+ if (
152
+ is_cross_attention
153
+ and past_key_value is not None
154
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
155
+ ):
156
+ # reuse k,v, cross_attentions
157
+ key_states = past_key_value[0]
158
+ value_states = past_key_value[1]
159
+ elif is_cross_attention:
160
+ # cross_attentions
161
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
162
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
163
+ elif past_key_value is not None:
164
+ # reuse k, v, self_attention
165
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
166
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
167
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
168
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
169
+ else:
170
+ # self_attention
171
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
172
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
173
+
174
+ if self.is_decoder:
175
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
176
+ # Further calls to cross_attention layer can then reuse all cross-attention
177
+ # key/value_states (first "if" case)
178
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
179
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
180
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
181
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
182
+ past_key_value = (key_states, value_states)
183
+
184
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
185
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
186
+ key_states = key_states.reshape(*proj_shape)
187
+ value_states = value_states.reshape(*proj_shape)
188
+
189
+ src_len = key_states.size(1)
190
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
191
+
192
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
193
+ raise ValueError(
194
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
195
+ f" {attn_weights.size()}"
196
+ )
197
+
198
+ if attention_mask is not None:
199
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
200
+ raise ValueError(
201
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
202
+ )
203
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
204
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
205
+
206
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
207
+
208
+ if layer_head_mask is not None:
209
+ if layer_head_mask.size() != (self.num_heads,):
210
+ raise ValueError(
211
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
212
+ f" {layer_head_mask.size()}"
213
+ )
214
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
215
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
216
+
217
+ if output_attentions:
218
+ # this operation is a bit awkward, but it's required to
219
+ # make sure that attn_weights keeps its gradient.
220
+ # In order to do so, attn_weights have to be reshaped
221
+ # twice and have to be reused in the following
222
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
223
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
224
+ else:
225
+ attn_weights_reshaped = None
226
+
227
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
228
+
229
+ attn_output = torch.bmm(attn_probs, value_states)
230
+
231
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
232
+ raise ValueError(
233
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
234
+ f" {attn_output.size()}"
235
+ )
236
+
237
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
238
+ attn_output = attn_output.transpose(1, 2)
239
+
240
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
241
+ # partitioned across GPUs when using tensor-parallelism.
242
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
243
+
244
+ attn_output = self.out_proj(attn_output)
245
+
246
+ return attn_output, attn_weights_reshaped, past_key_value
247
+
248
+
249
+ # Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
250
+ class BlenderbotSmallEncoderLayer(nn.Module):
251
+ def __init__(self, config: BlenderbotSmallConfig):
252
+ super().__init__()
253
+ self.embed_dim = config.d_model
254
+
255
+ self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
256
+ embed_dim=self.embed_dim,
257
+ num_heads=config.encoder_attention_heads,
258
+ dropout=config.attention_dropout,
259
+ config=config,
260
+ )
261
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
262
+ self.dropout = config.dropout
263
+ self.activation_fn = ACT2FN[config.activation_function]
264
+ self.activation_dropout = config.activation_dropout
265
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
266
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
267
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
268
+
269
+ def forward(
270
+ self,
271
+ hidden_states: torch.FloatTensor,
272
+ attention_mask: torch.FloatTensor,
273
+ layer_head_mask: torch.FloatTensor,
274
+ output_attentions: Optional[bool] = False,
275
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
276
+ """
277
+ Args:
278
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
279
+ attention_mask (`torch.FloatTensor`): attention mask of size
280
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
281
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
282
+ `(encoder_attention_heads,)`.
283
+ output_attentions (`bool`, *optional*):
284
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
285
+ returned tensors for more detail.
286
+ """
287
+ residual = hidden_states
288
+ hidden_states, attn_weights, _ = self.self_attn(
289
+ hidden_states=hidden_states,
290
+ attention_mask=attention_mask,
291
+ layer_head_mask=layer_head_mask,
292
+ output_attentions=output_attentions,
293
+ )
294
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
295
+ hidden_states = residual + hidden_states
296
+ hidden_states = self.self_attn_layer_norm(hidden_states)
297
+
298
+ residual = hidden_states
299
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
300
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
301
+ hidden_states = self.fc2(hidden_states)
302
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
303
+ hidden_states = residual + hidden_states
304
+ hidden_states = self.final_layer_norm(hidden_states)
305
+
306
+ if hidden_states.dtype == torch.float16 and (
307
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
308
+ ):
309
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
310
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
311
+
312
+ outputs = (hidden_states,)
313
+
314
+ if output_attentions:
315
+ outputs += (attn_weights,)
316
+
317
+ return outputs
318
+
319
+
320
+ # TODO: Implement attention with SDPA for TimeSeriesTransformer.
321
+ BLENDERBOT_SMALL_ATTENTION_CLASSES = {
322
+ "eager": BlenderbotSmallAttention,
323
+ }
324
+
325
+
326
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
327
+ class BlenderbotSmallDecoderLayer(nn.Module):
328
+ def __init__(self, config: BlenderbotSmallConfig):
329
+ super().__init__()
330
+ self.embed_dim = config.d_model
331
+
332
+ self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
333
+ embed_dim=self.embed_dim,
334
+ num_heads=config.decoder_attention_heads,
335
+ dropout=config.attention_dropout,
336
+ is_decoder=True,
337
+ is_causal=True,
338
+ config=config,
339
+ )
340
+ self.dropout = config.dropout
341
+ self.activation_fn = ACT2FN[config.activation_function]
342
+ self.activation_dropout = config.activation_dropout
343
+
344
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
345
+ self.encoder_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
346
+ self.embed_dim,
347
+ config.decoder_attention_heads,
348
+ dropout=config.attention_dropout,
349
+ is_decoder=True,
350
+ config=config,
351
+ )
352
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
353
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
354
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
355
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
356
+
357
+ def forward(
358
+ self,
359
+ hidden_states: torch.Tensor,
360
+ attention_mask: Optional[torch.Tensor] = None,
361
+ encoder_hidden_states: Optional[torch.Tensor] = None,
362
+ encoder_attention_mask: Optional[torch.Tensor] = None,
363
+ layer_head_mask: Optional[torch.Tensor] = None,
364
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
365
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
366
+ output_attentions: Optional[bool] = False,
367
+ use_cache: Optional[bool] = True,
368
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
369
+ """
370
+ Args:
371
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
372
+ attention_mask (`torch.FloatTensor`): attention mask of size
373
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
374
+ encoder_hidden_states (`torch.FloatTensor`):
375
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
376
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
377
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
378
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
379
+ `(encoder_attention_heads,)`.
380
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
381
+ size `(decoder_attention_heads,)`.
382
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
383
+ output_attentions (`bool`, *optional*):
384
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
385
+ returned tensors for more detail.
386
+ """
387
+ residual = hidden_states
388
+
389
+ # Self Attention
390
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
391
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
392
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
393
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
394
+ hidden_states=hidden_states,
395
+ past_key_value=self_attn_past_key_value,
396
+ attention_mask=attention_mask,
397
+ layer_head_mask=layer_head_mask,
398
+ output_attentions=output_attentions,
399
+ )
400
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
401
+ hidden_states = residual + hidden_states
402
+ hidden_states = self.self_attn_layer_norm(hidden_states)
403
+
404
+ # Cross-Attention Block
405
+ cross_attn_present_key_value = None
406
+ cross_attn_weights = None
407
+ if encoder_hidden_states is not None:
408
+ residual = hidden_states
409
+
410
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
411
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
412
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
413
+ hidden_states=hidden_states,
414
+ key_value_states=encoder_hidden_states,
415
+ attention_mask=encoder_attention_mask,
416
+ layer_head_mask=cross_attn_layer_head_mask,
417
+ past_key_value=cross_attn_past_key_value,
418
+ output_attentions=output_attentions,
419
+ )
420
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
421
+ hidden_states = residual + hidden_states
422
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
423
+
424
+ # add cross-attn to positions 3,4 of present_key_value tuple
425
+ present_key_value = present_key_value + cross_attn_present_key_value
426
+
427
+ # Fully Connected
428
+ residual = hidden_states
429
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
430
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
431
+ hidden_states = self.fc2(hidden_states)
432
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
433
+ hidden_states = residual + hidden_states
434
+ hidden_states = self.final_layer_norm(hidden_states)
435
+
436
+ outputs = (hidden_states,)
437
+
438
+ if output_attentions:
439
+ outputs += (self_attn_weights, cross_attn_weights)
440
+
441
+ if use_cache:
442
+ outputs += (present_key_value,)
443
+
444
+ return outputs
445
+
446
+
447
+ class BlenderbotSmallPreTrainedModel(PreTrainedModel):
448
+ config_class = BlenderbotSmallConfig
449
+ base_model_prefix = "model"
450
+ supports_gradient_checkpointing = True
451
+
452
+ def _init_weights(self, module):
453
+ std = self.config.init_std
454
+ if isinstance(module, nn.Linear):
455
+ module.weight.data.normal_(mean=0.0, std=std)
456
+ if module.bias is not None:
457
+ module.bias.data.zero_()
458
+ elif isinstance(module, nn.Embedding):
459
+ module.weight.data.normal_(mean=0.0, std=std)
460
+ if module.padding_idx is not None:
461
+ module.weight.data[module.padding_idx].zero_()
462
+
463
+ @property
464
+ def dummy_inputs(self):
465
+ pad_token = self.config.pad_token_id
466
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
467
+ dummy_inputs = {
468
+ "attention_mask": input_ids.ne(pad_token),
469
+ "input_ids": input_ids,
470
+ "decoder_input_ids": input_ids,
471
+ }
472
+ return dummy_inputs
473
+
474
+
475
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
476
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
477
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
478
+ etc.)
479
+
480
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
481
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
482
+ and behavior.
483
+
484
+ Parameters:
485
+ config ([`BlenderbotSmallConfig`]):
486
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
487
+ load the weights associated with the model, only the configuration. Check out the
488
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
489
+ """
490
+
491
+ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
492
+ Conversation example:
493
+
494
+ ```python
495
+ >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration
496
+
497
+ >>> mname = "facebook/blenderbot_small-90M"
498
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
499
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
500
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
501
+ >>> print("Human: ", UTTERANCE)
502
+ Human: My friends are cool but they eat too many carbs.
503
+
504
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
505
+ >>> reply_ids = model.generate(**inputs)
506
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
507
+ Bot: what kind of carbs do they eat? i don't know much about carbs.
508
+
509
+ >>> REPLY = "I'm not sure"
510
+ >>> print("Human: ", REPLY)
511
+ Human: I'm not sure
512
+
513
+ >>> NEXT_UTTERANCE = (
514
+ ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? "
515
+ ... "i don't know much about carbs__end__ "
516
+ ... "__start__ I'm not sure."
517
+ ... )
518
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
519
+ >>> next_reply_ids = model.generate(**inputs)
520
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
521
+ Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats.
522
+ ```
523
+ """
524
+
525
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
526
+ Args:
527
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
528
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
529
+ it.
530
+
531
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
532
+ [`PreTrainedTokenizer.__call__`] for details.
533
+
534
+ [What are input IDs?](../glossary#input-ids)
535
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
536
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
537
+
538
+ - 1 for tokens that are **not masked**,
539
+ - 0 for tokens that are **masked**.
540
+
541
+ [What are attention masks?](../glossary#attention-mask)
542
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
543
+ Indices of decoder input sequence tokens in the vocabulary.
544
+
545
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
546
+ [`PreTrainedTokenizer.__call__`] for details.
547
+
548
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
549
+
550
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
551
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
552
+ `past_key_values`).
553
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
554
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
555
+ be used by default.
556
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
557
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
558
+
559
+ - 1 indicates the head is **not masked**,
560
+ - 0 indicates the head is **masked**.
561
+
562
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
563
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
564
+
565
+ - 1 indicates the head is **not masked**,
566
+ - 0 indicates the head is **masked**.
567
+
568
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
569
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
570
+ 1]`:
571
+
572
+ - 1 indicates the head is **not masked**,
573
+ - 0 indicates the head is **masked**.
574
+
575
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
576
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
577
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
578
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
579
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
580
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
581
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
582
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
583
+
584
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
585
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
586
+
587
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
588
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
589
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
590
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
591
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
592
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
593
+ than the model's internal embedding lookup matrix.
594
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
595
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
596
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
597
+ input (see `past_key_values`). This is useful if you want more control over how to convert
598
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
599
+
600
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
601
+ of `inputs_embeds`.
602
+ use_cache (`bool`, *optional*):
603
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
604
+ `past_key_values`).
605
+ output_attentions (`bool`, *optional*):
606
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
607
+ tensors for more detail.
608
+ output_hidden_states (`bool`, *optional*):
609
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
610
+ more detail.
611
+ return_dict (`bool`, *optional*):
612
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
613
+ """
614
+
615
+
616
+ class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel):
617
+ """
618
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
619
+ [`BlenderbotSmallEncoderLayer`].
620
+
621
+ Args:
622
+ config: BlenderbotSmallConfig
623
+ embed_tokens (nn.Embedding): output embedding
624
+ """
625
+
626
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
627
+ super().__init__(config)
628
+
629
+ self.dropout = config.dropout
630
+ self.layerdrop = config.encoder_layerdrop
631
+
632
+ embed_dim = config.d_model
633
+ self.padding_idx = config.pad_token_id
634
+ self.max_source_positions = config.max_position_embeddings
635
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
636
+
637
+ if embed_tokens is not None:
638
+ self.embed_tokens = embed_tokens
639
+ else:
640
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
641
+
642
+ self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
643
+ config.max_position_embeddings,
644
+ embed_dim,
645
+ )
646
+ self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
647
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
648
+
649
+ self.gradient_checkpointing = False
650
+ # Initialize weights and apply final processing
651
+ self.post_init()
652
+
653
+ def forward(
654
+ self,
655
+ input_ids=None,
656
+ attention_mask=None,
657
+ head_mask=None,
658
+ inputs_embeds=None,
659
+ output_attentions=None,
660
+ output_hidden_states=None,
661
+ return_dict=None,
662
+ ):
663
+ r"""
664
+ Args:
665
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
666
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
667
+ provide it.
668
+
669
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
670
+ [`PreTrainedTokenizer.__call__`] for details.
671
+
672
+ [What are input IDs?](../glossary#input-ids)
673
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
674
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
675
+
676
+ - 1 for tokens that are **not masked**,
677
+ - 0 for tokens that are **masked**.
678
+
679
+ [What are attention masks?](../glossary#attention-mask)
680
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
681
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
682
+
683
+ - 1 indicates the head is **not masked**,
684
+ - 0 indicates the head is **masked**.
685
+
686
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
687
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
688
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
689
+ than the model's internal embedding lookup matrix.
690
+ output_attentions (`bool`, *optional*):
691
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
692
+ returned tensors for more detail.
693
+ output_hidden_states (`bool`, *optional*):
694
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
695
+ for more detail.
696
+ return_dict (`bool`, *optional*):
697
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
698
+ """
699
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
700
+ output_hidden_states = (
701
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
702
+ )
703
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
704
+
705
+ # retrieve input_ids and inputs_embeds
706
+ if input_ids is not None and inputs_embeds is not None:
707
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
708
+ elif input_ids is not None:
709
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
710
+ input_shape = input_ids.size()
711
+ input_ids = input_ids.view(-1, input_shape[-1])
712
+ elif inputs_embeds is not None:
713
+ input_shape = inputs_embeds.size()[:-1]
714
+ else:
715
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
716
+
717
+ if inputs_embeds is None:
718
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
719
+
720
+ embed_pos = self.embed_positions(input_shape)
721
+
722
+ hidden_states = inputs_embeds + embed_pos
723
+ hidden_states = self.layernorm_embedding(hidden_states)
724
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
725
+
726
+ # expand attention_mask
727
+ if attention_mask is not None:
728
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
729
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
730
+
731
+ encoder_states = () if output_hidden_states else None
732
+ all_attentions = () if output_attentions else None
733
+
734
+ # check if head_mask has a correct number of layers specified if desired
735
+ if head_mask is not None:
736
+ if head_mask.size()[0] != len(self.layers):
737
+ raise ValueError(
738
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
739
+ f" {head_mask.size()[0]}."
740
+ )
741
+ for idx, encoder_layer in enumerate(self.layers):
742
+ if output_hidden_states:
743
+ encoder_states = encoder_states + (hidden_states,)
744
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
745
+ to_drop = False
746
+ if self.training:
747
+ dropout_probability = torch.rand([])
748
+ if dropout_probability < self.layerdrop: # skip the layer
749
+ to_drop = True
750
+
751
+ if to_drop:
752
+ layer_outputs = (None, None)
753
+ else:
754
+ if self.gradient_checkpointing and self.training:
755
+ layer_outputs = self._gradient_checkpointing_func(
756
+ encoder_layer.__call__,
757
+ hidden_states,
758
+ attention_mask,
759
+ (head_mask[idx] if head_mask is not None else None),
760
+ output_attentions,
761
+ )
762
+ else:
763
+ layer_outputs = encoder_layer(
764
+ hidden_states,
765
+ attention_mask,
766
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
767
+ output_attentions=output_attentions,
768
+ )
769
+
770
+ hidden_states = layer_outputs[0]
771
+
772
+ if output_attentions:
773
+ all_attentions = all_attentions + (layer_outputs[1],)
774
+
775
+ if output_hidden_states:
776
+ encoder_states = encoder_states + (hidden_states,)
777
+
778
+ if not return_dict:
779
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
780
+ return BaseModelOutput(
781
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
782
+ )
783
+
784
+
785
+ class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
786
+ """
787
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`]
788
+
789
+ Args:
790
+ config: BlenderbotSmallConfig
791
+ embed_tokens (nn.Embedding): output embedding
792
+ """
793
+
794
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
795
+ super().__init__(config)
796
+ self.dropout = config.dropout
797
+ self.layerdrop = config.decoder_layerdrop
798
+ self.padding_idx = config.pad_token_id
799
+ self.max_target_positions = config.max_position_embeddings
800
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
801
+
802
+ if embed_tokens is not None:
803
+ self.embed_tokens = embed_tokens
804
+ else:
805
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
806
+
807
+ self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
808
+ config.max_position_embeddings,
809
+ config.d_model,
810
+ )
811
+ self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)])
812
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
813
+
814
+ self.gradient_checkpointing = False
815
+ # Initialize weights and apply final processing
816
+ self.post_init()
817
+
818
+ def get_input_embeddings(self):
819
+ return self.embed_tokens
820
+
821
+ def set_input_embeddings(self, value):
822
+ self.embed_tokens = value
823
+
824
+ def forward(
825
+ self,
826
+ input_ids=None,
827
+ attention_mask=None,
828
+ encoder_hidden_states=None,
829
+ encoder_attention_mask=None,
830
+ head_mask=None,
831
+ cross_attn_head_mask=None,
832
+ past_key_values=None,
833
+ inputs_embeds=None,
834
+ use_cache=None,
835
+ output_attentions=None,
836
+ output_hidden_states=None,
837
+ return_dict=None,
838
+ ):
839
+ r"""
840
+ Args:
841
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
842
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
843
+ provide it.
844
+
845
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
846
+ [`PreTrainedTokenizer.__call__`] for details.
847
+
848
+ [What are input IDs?](../glossary#input-ids)
849
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
850
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
851
+
852
+ - 1 for tokens that are **not masked**,
853
+ - 0 for tokens that are **masked**.
854
+
855
+ [What are attention masks?](../glossary#attention-mask)
856
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
857
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
858
+ of the decoder.
859
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
860
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
861
+ selected in `[0, 1]`:
862
+
863
+ - 1 for tokens that are **not masked**,
864
+ - 0 for tokens that are **masked**.
865
+
866
+ [What are attention masks?](../glossary#attention-mask)
867
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
868
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
869
+
870
+ - 1 indicates the head is **not masked**,
871
+ - 0 indicates the head is **masked**.
872
+
873
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
874
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
875
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
876
+
877
+ - 1 indicates the head is **not masked**,
878
+ - 0 indicates the head is **masked**.
879
+
880
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
881
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
882
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
883
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
884
+
885
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
886
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
887
+
888
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
889
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
890
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
891
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
892
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
893
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
894
+ than the model's internal embedding lookup matrix.
895
+ output_attentions (`bool`, *optional*):
896
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
897
+ returned tensors for more detail.
898
+ output_hidden_states (`bool`, *optional*):
899
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
900
+ for more detail.
901
+ return_dict (`bool`, *optional*):
902
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
903
+ """
904
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
905
+ output_hidden_states = (
906
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
907
+ )
908
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
909
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
910
+
911
+ # retrieve input_ids and inputs_embeds
912
+ if input_ids is not None and inputs_embeds is not None:
913
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
914
+ elif input_ids is not None:
915
+ input_shape = input_ids.size()
916
+ input_ids = input_ids.view(-1, input_shape[-1])
917
+ elif inputs_embeds is not None:
918
+ input_shape = inputs_embeds.size()[:-1]
919
+ else:
920
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
921
+
922
+ # past_key_values_length
923
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
924
+
925
+ if inputs_embeds is None:
926
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
927
+
928
+ attention_mask = _prepare_4d_causal_attention_mask(
929
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
930
+ )
931
+
932
+ # expand encoder attention mask
933
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
934
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
935
+ encoder_attention_mask = _prepare_4d_attention_mask(
936
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
937
+ )
938
+
939
+ # embed positions
940
+ positions = self.embed_positions(input_shape, past_key_values_length)
941
+
942
+ # BlenderbotSmall applies layer norm on hidden_states
943
+ inputs_embeds = self.layernorm_embedding(inputs_embeds)
944
+ hidden_states = inputs_embeds + positions
945
+
946
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
947
+
948
+ if self.gradient_checkpointing and self.training:
949
+ if use_cache:
950
+ logger.warning_once(
951
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
952
+ )
953
+ use_cache = False
954
+
955
+ # decoder layers
956
+ all_hidden_states = () if output_hidden_states else None
957
+ all_self_attns = () if output_attentions else None
958
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
959
+ next_decoder_cache = () if use_cache else None
960
+
961
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
962
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
963
+ if attn_mask is not None:
964
+ if attn_mask.size()[0] != len(self.layers):
965
+ raise ValueError(
966
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
967
+ f" {head_mask.size()[0]}."
968
+ )
969
+ for idx, decoder_layer in enumerate(self.layers):
970
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
971
+ if output_hidden_states:
972
+ all_hidden_states += (hidden_states,)
973
+ if self.training:
974
+ dropout_probability = torch.rand([])
975
+ if dropout_probability < self.layerdrop:
976
+ continue
977
+
978
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
979
+
980
+ if self.gradient_checkpointing and self.training:
981
+ layer_outputs = self._gradient_checkpointing_func(
982
+ decoder_layer.__call__,
983
+ hidden_states,
984
+ attention_mask,
985
+ encoder_hidden_states,
986
+ encoder_attention_mask,
987
+ head_mask[idx] if head_mask is not None else None,
988
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
989
+ None,
990
+ output_attentions,
991
+ use_cache,
992
+ )
993
+ else:
994
+ layer_outputs = decoder_layer(
995
+ hidden_states,
996
+ attention_mask=attention_mask,
997
+ encoder_hidden_states=encoder_hidden_states,
998
+ encoder_attention_mask=encoder_attention_mask,
999
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1000
+ cross_attn_layer_head_mask=(
1001
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1002
+ ),
1003
+ past_key_value=past_key_value,
1004
+ output_attentions=output_attentions,
1005
+ use_cache=use_cache,
1006
+ )
1007
+ hidden_states = layer_outputs[0]
1008
+
1009
+ if use_cache:
1010
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1011
+
1012
+ if output_attentions:
1013
+ all_self_attns += (layer_outputs[1],)
1014
+
1015
+ if encoder_hidden_states is not None:
1016
+ all_cross_attentions += (layer_outputs[2],)
1017
+
1018
+ # add hidden states from the last decoder layer
1019
+ if output_hidden_states:
1020
+ all_hidden_states += (hidden_states,)
1021
+
1022
+ next_cache = next_decoder_cache if use_cache else None
1023
+ if not return_dict:
1024
+ return tuple(
1025
+ v
1026
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1027
+ if v is not None
1028
+ )
1029
+ return BaseModelOutputWithPastAndCrossAttentions(
1030
+ last_hidden_state=hidden_states,
1031
+ past_key_values=next_cache,
1032
+ hidden_states=all_hidden_states,
1033
+ attentions=all_self_attns,
1034
+ cross_attentions=all_cross_attentions,
1035
+ )
1036
+
1037
+
1038
+ @add_start_docstrings(
1039
+ "The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.",
1040
+ BLENDERBOT_SMALL_START_DOCSTRING,
1041
+ )
1042
+ class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
1043
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
1044
+
1045
+ def __init__(self, config: BlenderbotSmallConfig):
1046
+ super().__init__(config)
1047
+
1048
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1049
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1050
+
1051
+ self.encoder = BlenderbotSmallEncoder(config, self.shared)
1052
+ self.decoder = BlenderbotSmallDecoder(config, self.shared)
1053
+
1054
+ # Initialize weights and apply final processing
1055
+ self.post_init()
1056
+
1057
+ def get_input_embeddings(self):
1058
+ return self.shared
1059
+
1060
+ def set_input_embeddings(self, value):
1061
+ self.shared = value
1062
+ self.encoder.embed_tokens = self.shared
1063
+ self.decoder.embed_tokens = self.shared
1064
+
1065
+ def get_encoder(self):
1066
+ return self.encoder
1067
+
1068
+ def get_decoder(self):
1069
+ return self.decoder
1070
+
1071
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1072
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1073
+ def forward(
1074
+ self,
1075
+ input_ids: Optional[torch.LongTensor] = None,
1076
+ attention_mask: Optional[torch.Tensor] = None,
1077
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1078
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1079
+ head_mask: Optional[torch.Tensor] = None,
1080
+ decoder_head_mask: Optional[torch.Tensor] = None,
1081
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1082
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
1083
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1084
+ inputs_embeds: Optional[torch.Tensor] = None,
1085
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1086
+ use_cache: Optional[bool] = None,
1087
+ output_attentions: Optional[bool] = None,
1088
+ output_hidden_states: Optional[bool] = None,
1089
+ return_dict: Optional[bool] = None,
1090
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
1091
+ r"""
1092
+ Returns:
1093
+
1094
+ Example:
1095
+
1096
+ ```python
1097
+ >>> from transformers import AutoTokenizer, BlenderbotSmallModel
1098
+
1099
+ >>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M")
1100
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1101
+
1102
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
1103
+ >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1
1104
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
1105
+
1106
+ >>> last_hidden_states = outputs.last_hidden_state
1107
+ >>> list(last_hidden_states.shape)
1108
+ [1, 3, 512]
1109
+ ```"""
1110
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1111
+ output_hidden_states = (
1112
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1113
+ )
1114
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1115
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1116
+
1117
+ if encoder_outputs is None:
1118
+ encoder_outputs = self.encoder(
1119
+ input_ids=input_ids,
1120
+ attention_mask=attention_mask,
1121
+ head_mask=head_mask,
1122
+ inputs_embeds=inputs_embeds,
1123
+ output_attentions=output_attentions,
1124
+ output_hidden_states=output_hidden_states,
1125
+ return_dict=return_dict,
1126
+ )
1127
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1128
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1129
+ encoder_outputs = BaseModelOutput(
1130
+ last_hidden_state=encoder_outputs[0],
1131
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1132
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1133
+ )
1134
+
1135
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1136
+ decoder_outputs = self.decoder(
1137
+ input_ids=decoder_input_ids,
1138
+ attention_mask=decoder_attention_mask,
1139
+ encoder_hidden_states=encoder_outputs[0],
1140
+ encoder_attention_mask=attention_mask,
1141
+ head_mask=decoder_head_mask,
1142
+ cross_attn_head_mask=cross_attn_head_mask,
1143
+ past_key_values=past_key_values,
1144
+ inputs_embeds=decoder_inputs_embeds,
1145
+ use_cache=use_cache,
1146
+ output_attentions=output_attentions,
1147
+ output_hidden_states=output_hidden_states,
1148
+ return_dict=return_dict,
1149
+ )
1150
+
1151
+ if not return_dict:
1152
+ return decoder_outputs + encoder_outputs
1153
+
1154
+ return Seq2SeqModelOutput(
1155
+ last_hidden_state=decoder_outputs.last_hidden_state,
1156
+ past_key_values=decoder_outputs.past_key_values,
1157
+ decoder_hidden_states=decoder_outputs.hidden_states,
1158
+ decoder_attentions=decoder_outputs.attentions,
1159
+ cross_attentions=decoder_outputs.cross_attentions,
1160
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1161
+ encoder_hidden_states=encoder_outputs.hidden_states,
1162
+ encoder_attentions=encoder_outputs.attentions,
1163
+ )
1164
+
1165
+
1166
+ @add_start_docstrings(
1167
+ "The BlenderbotSmall Model with a language modeling head. Can be used for summarization.",
1168
+ BLENDERBOT_SMALL_START_DOCSTRING,
1169
+ )
1170
+ class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
1171
+ base_model_prefix = "model"
1172
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
1173
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
1174
+
1175
+ def __init__(self, config: BlenderbotSmallConfig):
1176
+ super().__init__(config)
1177
+ self.model = BlenderbotSmallModel(config)
1178
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
1179
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1180
+
1181
+ # Initialize weights and apply final processing
1182
+ self.post_init()
1183
+
1184
+ def get_encoder(self):
1185
+ return self.model.get_encoder()
1186
+
1187
+ def get_decoder(self):
1188
+ return self.model.get_decoder()
1189
+
1190
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1191
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1192
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1193
+ return new_embeddings
1194
+
1195
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1196
+ old_num_tokens = self.final_logits_bias.shape[-1]
1197
+ if new_num_tokens <= old_num_tokens:
1198
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1199
+ else:
1200
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1201
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1202
+ self.register_buffer("final_logits_bias", new_bias)
1203
+
1204
+ def get_output_embeddings(self):
1205
+ return self.lm_head
1206
+
1207
+ def set_output_embeddings(self, new_embeddings):
1208
+ self.lm_head = new_embeddings
1209
+
1210
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1211
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1212
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
1213
+ def forward(
1214
+ self,
1215
+ input_ids: Optional[torch.LongTensor] = None,
1216
+ attention_mask: Optional[torch.Tensor] = None,
1217
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1218
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1219
+ head_mask: Optional[torch.Tensor] = None,
1220
+ decoder_head_mask: Optional[torch.Tensor] = None,
1221
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1222
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
1223
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1224
+ inputs_embeds: Optional[torch.Tensor] = None,
1225
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1226
+ labels: Optional[torch.LongTensor] = None,
1227
+ use_cache: Optional[bool] = None,
1228
+ output_attentions: Optional[bool] = None,
1229
+ output_hidden_states: Optional[bool] = None,
1230
+ return_dict: Optional[bool] = None,
1231
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
1232
+ r"""
1233
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1234
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1235
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1236
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1237
+
1238
+ Returns:
1239
+ """
1240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1241
+
1242
+ if labels is not None:
1243
+ if use_cache:
1244
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1245
+ use_cache = False
1246
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1247
+ decoder_input_ids = shift_tokens_right(
1248
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1249
+ )
1250
+
1251
+ outputs = self.model(
1252
+ input_ids,
1253
+ attention_mask=attention_mask,
1254
+ decoder_input_ids=decoder_input_ids,
1255
+ encoder_outputs=encoder_outputs,
1256
+ decoder_attention_mask=decoder_attention_mask,
1257
+ head_mask=head_mask,
1258
+ decoder_head_mask=decoder_head_mask,
1259
+ cross_attn_head_mask=cross_attn_head_mask,
1260
+ past_key_values=past_key_values,
1261
+ inputs_embeds=inputs_embeds,
1262
+ decoder_inputs_embeds=decoder_inputs_embeds,
1263
+ use_cache=use_cache,
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ )
1268
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
1269
+
1270
+ masked_lm_loss = None
1271
+ if labels is not None:
1272
+ loss_fct = CrossEntropyLoss()
1273
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1274
+
1275
+ if not return_dict:
1276
+ output = (lm_logits,) + outputs[1:]
1277
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1278
+
1279
+ return Seq2SeqLMOutput(
1280
+ loss=masked_lm_loss,
1281
+ logits=lm_logits,
1282
+ past_key_values=outputs.past_key_values,
1283
+ decoder_hidden_states=outputs.decoder_hidden_states,
1284
+ decoder_attentions=outputs.decoder_attentions,
1285
+ cross_attentions=outputs.cross_attentions,
1286
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1287
+ encoder_hidden_states=outputs.encoder_hidden_states,
1288
+ encoder_attentions=outputs.encoder_attentions,
1289
+ )
1290
+
1291
+ def prepare_inputs_for_generation(
1292
+ self,
1293
+ decoder_input_ids,
1294
+ past_key_values=None,
1295
+ attention_mask=None,
1296
+ head_mask=None,
1297
+ decoder_head_mask=None,
1298
+ cross_attn_head_mask=None,
1299
+ use_cache=None,
1300
+ encoder_outputs=None,
1301
+ **kwargs,
1302
+ ):
1303
+ # cut decoder_input_ids if past is used
1304
+ if past_key_values is not None:
1305
+ past_length = past_key_values[0][0].shape[2]
1306
+
1307
+ # Some generation methods already pass only the last input ID
1308
+ if decoder_input_ids.shape[1] > past_length:
1309
+ remove_prefix_length = past_length
1310
+ else:
1311
+ # Default to old behavior: keep only final ID
1312
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1313
+
1314
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1315
+
1316
+ return {
1317
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1318
+ "encoder_outputs": encoder_outputs,
1319
+ "past_key_values": past_key_values,
1320
+ "decoder_input_ids": decoder_input_ids,
1321
+ "attention_mask": attention_mask,
1322
+ "head_mask": head_mask,
1323
+ "decoder_head_mask": decoder_head_mask,
1324
+ "cross_attn_head_mask": cross_attn_head_mask,
1325
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1326
+ }
1327
+
1328
+ @staticmethod
1329
+ def _reorder_cache(past_key_values, beam_idx):
1330
+ reordered_past = ()
1331
+ for layer_past in past_key_values:
1332
+ # cached cross_attention states don't have to be reordered -> they are always the same
1333
+ reordered_past += (
1334
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1335
+ + layer_past[2:],
1336
+ )
1337
+ return reordered_past
1338
+
1339
+
1340
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->BlenderbotSmall
1341
+ class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel):
1342
+ """
1343
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1344
+ used in combination with the [`EncoderDecoderModel`] framework.
1345
+ """
1346
+
1347
+ def __init__(self, config):
1348
+ super().__init__(config)
1349
+ self.decoder = BlenderbotSmallDecoder(config)
1350
+
1351
+ def forward(self, *args, **kwargs):
1352
+ return self.decoder(*args, **kwargs)
1353
+
1354
+
1355
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M
1356
+ class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel):
1357
+ _tied_weights_keys = ["lm_head.weight"]
1358
+
1359
+ def __init__(self, config):
1360
+ config = copy.deepcopy(config)
1361
+ config.is_decoder = True
1362
+ config.is_encoder_decoder = False
1363
+ super().__init__(config)
1364
+ self.model = BlenderbotSmallDecoderWrapper(config)
1365
+
1366
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1367
+
1368
+ # Initialize weights and apply final processing
1369
+ self.post_init()
1370
+
1371
+ def get_input_embeddings(self):
1372
+ return self.model.decoder.embed_tokens
1373
+
1374
+ def set_input_embeddings(self, value):
1375
+ self.model.decoder.embed_tokens = value
1376
+
1377
+ def get_output_embeddings(self):
1378
+ return self.lm_head
1379
+
1380
+ def set_output_embeddings(self, new_embeddings):
1381
+ self.lm_head = new_embeddings
1382
+
1383
+ def set_decoder(self, decoder):
1384
+ self.model.decoder = decoder
1385
+
1386
+ def get_decoder(self):
1387
+ return self.model.decoder
1388
+
1389
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1390
+ def forward(
1391
+ self,
1392
+ input_ids: torch.LongTensor = None,
1393
+ attention_mask: Optional[torch.Tensor] = None,
1394
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1395
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1396
+ head_mask: Optional[torch.Tensor] = None,
1397
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1398
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1399
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1400
+ labels: Optional[torch.LongTensor] = None,
1401
+ use_cache: Optional[bool] = None,
1402
+ output_attentions: Optional[bool] = None,
1403
+ output_hidden_states: Optional[bool] = None,
1404
+ return_dict: Optional[bool] = None,
1405
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1406
+ r"""
1407
+ Args:
1408
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1409
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1410
+ provide it.
1411
+
1412
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1413
+ [`PreTrainedTokenizer.__call__`] for details.
1414
+
1415
+ [What are input IDs?](../glossary#input-ids)
1416
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1417
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1418
+
1419
+ - 1 for tokens that are **not masked**,
1420
+ - 0 for tokens that are **masked**.
1421
+
1422
+ [What are attention masks?](../glossary#attention-mask)
1423
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1424
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1425
+ if the model is configured as a decoder.
1426
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1427
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
1428
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1429
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1430
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1431
+
1432
+ - 1 indicates the head is **not masked**,
1433
+ - 0 indicates the head is **masked**.
1434
+
1435
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1436
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1437
+
1438
+ - 1 indicates the head is **not masked**,
1439
+ - 0 indicates the head is **masked**.
1440
+
1441
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1442
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1443
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1444
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
1445
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
1446
+
1447
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1448
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1449
+
1450
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1451
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1452
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1453
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1454
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1455
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1456
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1457
+ use_cache (`bool`, *optional*):
1458
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1459
+ (see `past_key_values`).
1460
+
1461
+ - 1 for tokens that are **not masked**,
1462
+ - 0 for tokens that are **masked**.
1463
+ output_attentions (`bool`, *optional*):
1464
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1465
+ returned tensors for more detail.
1466
+ output_hidden_states (`bool`, *optional*):
1467
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1468
+ for more detail.
1469
+ return_dict (`bool`, *optional*):
1470
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1471
+
1472
+ Returns:
1473
+
1474
+ Example:
1475
+
1476
+ ```python
1477
+ >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM
1478
+
1479
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1480
+ >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False)
1481
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
1482
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1483
+ >>> outputs = model(**inputs)
1484
+
1485
+ >>> logits = outputs.logits
1486
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
1487
+ >>> list(logits.shape) == expected_shape
1488
+ True
1489
+ ```"""
1490
+
1491
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1492
+ output_hidden_states = (
1493
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1494
+ )
1495
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1496
+
1497
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1498
+ outputs = self.model.decoder(
1499
+ input_ids=input_ids,
1500
+ attention_mask=attention_mask,
1501
+ encoder_hidden_states=encoder_hidden_states,
1502
+ encoder_attention_mask=encoder_attention_mask,
1503
+ head_mask=head_mask,
1504
+ cross_attn_head_mask=cross_attn_head_mask,
1505
+ past_key_values=past_key_values,
1506
+ inputs_embeds=inputs_embeds,
1507
+ use_cache=use_cache,
1508
+ output_attentions=output_attentions,
1509
+ output_hidden_states=output_hidden_states,
1510
+ return_dict=return_dict,
1511
+ )
1512
+
1513
+ logits = self.lm_head(outputs[0])
1514
+
1515
+ loss = None
1516
+ if labels is not None:
1517
+ labels = labels.to(logits.device)
1518
+ loss_fct = CrossEntropyLoss()
1519
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
1520
+
1521
+ if not return_dict:
1522
+ output = (logits,) + outputs[1:]
1523
+ return (loss,) + output if loss is not None else output
1524
+
1525
+ return CausalLMOutputWithCrossAttentions(
1526
+ loss=loss,
1527
+ logits=logits,
1528
+ past_key_values=outputs.past_key_values,
1529
+ hidden_states=outputs.hidden_states,
1530
+ attentions=outputs.attentions,
1531
+ cross_attentions=outputs.cross_attentions,
1532
+ )
1533
+
1534
+ def prepare_inputs_for_generation(
1535
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1536
+ ):
1537
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1538
+ if attention_mask is None:
1539
+ attention_mask = input_ids.new_ones(input_ids.shape)
1540
+
1541
+ if past_key_values:
1542
+ past_length = past_key_values[0][0].shape[2]
1543
+
1544
+ # Some generation methods already pass only the last input ID
1545
+ if input_ids.shape[1] > past_length:
1546
+ remove_prefix_length = past_length
1547
+ else:
1548
+ # Default to old behavior: keep only final ID
1549
+ remove_prefix_length = input_ids.shape[1] - 1
1550
+
1551
+ input_ids = input_ids[:, remove_prefix_length:]
1552
+ # first step, decoder_cached_states are empty
1553
+ return {
1554
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
1555
+ "attention_mask": attention_mask,
1556
+ "past_key_values": past_key_values,
1557
+ "use_cache": use_cache,
1558
+ }
1559
+
1560
+ @staticmethod
1561
+ def _reorder_cache(past_key_values, beam_idx):
1562
+ reordered_past = ()
1563
+ for layer_past in past_key_values:
1564
+ reordered_past += (
1565
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1566
+ )
1567
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py ADDED
@@ -0,0 +1,1522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax BlenderbotSmall model."""
16
+
17
+
18
+ import math
19
+ import random
20
+ from functools import partial
21
+ from typing import Callable, Optional, Tuple
22
+
23
+ import flax.linen as nn
24
+ import jax
25
+ import jax.numpy as jnp
26
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
27
+ from flax.linen import combine_masks, make_causal_mask
28
+ from flax.linen.attention import dot_product_attention_weights
29
+ from flax.traverse_util import flatten_dict, unflatten_dict
30
+ from jax import lax
31
+ from jax.random import PRNGKey
32
+
33
+ from ...modeling_flax_outputs import (
34
+ FlaxBaseModelOutput,
35
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
36
+ FlaxCausalLMOutputWithCrossAttentions,
37
+ FlaxSeq2SeqLMOutput,
38
+ FlaxSeq2SeqModelOutput,
39
+ )
40
+ from ...modeling_flax_utils import (
41
+ ACT2FN,
42
+ FlaxPreTrainedModel,
43
+ append_call_sample_docstring,
44
+ append_replace_return_docstrings,
45
+ overwrite_call_docstring,
46
+ )
47
+ from ...utils import add_start_docstrings, logging, replace_return_docstrings
48
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
54
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
55
+
56
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
57
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
58
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
59
+ etc.)
60
+
61
+ This model is also a Flax Linen
62
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
63
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
64
+
65
+ Finally, this model supports inherent JAX features such as:
66
+
67
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
68
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
69
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
70
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
71
+
72
+ Parameters:
73
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
74
+ Initializing with a config file does not load the weights associated with the model, only the
75
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
76
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
77
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
78
+ `jax.numpy.bfloat16` (on TPUs).
79
+
80
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
81
+ specified all the computation will be performed with the given `dtype`.
82
+
83
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
84
+ parameters.**
85
+
86
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
87
+ [`~FlaxPreTrainedModel.to_bf16`].
88
+ """
89
+
90
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
91
+ Args:
92
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
93
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
94
+ it.
95
+
96
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
97
+ [`PreTrainedTokenizer.__call__`] for details.
98
+
99
+ [What are input IDs?](../glossary#input-ids)
100
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
101
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
102
+
103
+ - 1 for tokens that are **not masked**,
104
+ - 0 for tokens that are **masked**.
105
+
106
+ [What are attention masks?](../glossary#attention-mask)
107
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Indices of decoder input sequence tokens in the vocabulary.
109
+
110
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
111
+ [`PreTrainedTokenizer.__call__`] for details.
112
+
113
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
114
+
115
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
116
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
117
+ for denoising pre-training following the paper.
118
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
119
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
120
+ be used by default.
121
+
122
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
123
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
124
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
125
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
126
+ config.max_position_embeddings - 1]`.
127
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
128
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
129
+ range `[0, config.max_position_embeddings - 1]`.
130
+ output_attentions (`bool`, *optional*):
131
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
132
+ tensors for more detail.
133
+ output_hidden_states (`bool`, *optional*):
134
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
135
+ more detail.
136
+ return_dict (`bool`, *optional*):
137
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
138
+ """
139
+
140
+
141
+ BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING = r"""
142
+ Args:
143
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
144
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
145
+ it.
146
+
147
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
148
+ [`PreTrainedTokenizer.__call__`] for details.
149
+
150
+ [What are input IDs?](../glossary#input-ids)
151
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
152
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
153
+
154
+ - 1 for tokens that are **not masked**,
155
+ - 0 for tokens that are **masked**.
156
+
157
+ [What are attention masks?](../glossary#attention-mask)
158
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
159
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
160
+ config.max_position_embeddings - 1]`.
161
+ output_attentions (`bool`, *optional*):
162
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
163
+ tensors for more detail.
164
+ output_hidden_states (`bool`, *optional*):
165
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
166
+ more detail.
167
+ return_dict (`bool`, *optional*):
168
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
169
+ """
170
+
171
+ BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING = r"""
172
+ Args:
173
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
174
+ Indices of decoder input sequence tokens in the vocabulary.
175
+
176
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
177
+ [`PreTrainedTokenizer.__call__`] for details.
178
+
179
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
180
+
181
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
182
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
183
+ for denoising pre-training following the paper.
184
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
185
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
186
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
187
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
188
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
189
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
190
+
191
+ - 1 for tokens that are **not masked**,
192
+ - 0 for tokens that are **masked**.
193
+
194
+ [What are attention masks?](../glossary#attention-mask)
195
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
196
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
197
+ be used by default.
198
+
199
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
200
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
201
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
202
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
203
+ range `[0, config.max_position_embeddings - 1]`.
204
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
205
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
206
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
207
+ output_attentions (`bool`, *optional*):
208
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
209
+ tensors for more detail.
210
+ output_hidden_states (`bool`, *optional*):
211
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
212
+ more detail.
213
+ return_dict (`bool`, *optional*):
214
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
215
+ """
216
+
217
+
218
+ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
219
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
220
+ """
221
+ Shift input ids one token to the right.
222
+ """
223
+ shifted_input_ids = jnp.zeros_like(input_ids)
224
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
225
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
226
+
227
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
228
+ return shifted_input_ids
229
+
230
+
231
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->BlenderbotSmall
232
+ class FlaxBlenderbotSmallAttention(nn.Module):
233
+ config: BlenderbotSmallConfig
234
+ embed_dim: int
235
+ num_heads: int
236
+ dropout: float = 0.0
237
+ causal: bool = False
238
+ bias: bool = True
239
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
240
+
241
+ def setup(self) -> None:
242
+ self.head_dim = self.embed_dim // self.num_heads
243
+ if self.head_dim * self.num_heads != self.embed_dim:
244
+ raise ValueError(
245
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
246
+ f" and `num_heads`: {self.num_heads})."
247
+ )
248
+
249
+ dense = partial(
250
+ nn.Dense,
251
+ self.embed_dim,
252
+ use_bias=self.bias,
253
+ dtype=self.dtype,
254
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
255
+ )
256
+
257
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
258
+ self.out_proj = dense()
259
+
260
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
261
+
262
+ if self.causal:
263
+ self.causal_mask = make_causal_mask(
264
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
265
+ )
266
+
267
+ def _split_heads(self, hidden_states):
268
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
269
+
270
+ def _merge_heads(self, hidden_states):
271
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
272
+
273
+ @nn.compact
274
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
275
+ """
276
+ This function takes projected key, value states from a single input token and concatenates the states to cached
277
+ states from previous steps. This function is slighly adapted from the official Flax repository:
278
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
279
+ """
280
+ # detect if we're initializing by absence of existing cache data.
281
+ is_initialized = self.has_variable("cache", "cached_key")
282
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
283
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
284
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
285
+
286
+ if is_initialized:
287
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
288
+ # update key, value caches with our new 1d spatial slices
289
+ cur_index = cache_index.value
290
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
291
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
292
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
293
+ cached_key.value = key
294
+ cached_value.value = value
295
+ num_updated_cache_vectors = query.shape[1]
296
+ cache_index.value = cache_index.value + num_updated_cache_vectors
297
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
298
+ pad_mask = jnp.broadcast_to(
299
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
300
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
301
+ )
302
+ attention_mask = combine_masks(pad_mask, attention_mask)
303
+ return key, value, attention_mask
304
+
305
+ def __call__(
306
+ self,
307
+ hidden_states: jnp.ndarray,
308
+ key_value_states: Optional[jnp.ndarray] = None,
309
+ attention_mask: Optional[jnp.ndarray] = None,
310
+ init_cache: bool = False,
311
+ deterministic: bool = True,
312
+ ) -> Tuple[jnp.ndarray]:
313
+ """Input shape: Batch x Time x Channel"""
314
+
315
+ # if key_value_states are provided this layer is used as a cross-attention layer
316
+ # for the decoder
317
+ is_cross_attention = key_value_states is not None
318
+ batch_size = hidden_states.shape[0]
319
+
320
+ # get query proj
321
+ query_states = self.q_proj(hidden_states)
322
+ # get key, value proj
323
+ if is_cross_attention:
324
+ # cross_attentions
325
+ key_states = self.k_proj(key_value_states)
326
+ value_states = self.v_proj(key_value_states)
327
+ else:
328
+ # self_attention
329
+ key_states = self.k_proj(hidden_states)
330
+ value_states = self.v_proj(hidden_states)
331
+
332
+ query_states = self._split_heads(query_states)
333
+ key_states = self._split_heads(key_states)
334
+ value_states = self._split_heads(value_states)
335
+
336
+ # handle cache prepare causal attention mask
337
+ if self.causal:
338
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
339
+ if self.has_variable("cache", "cached_key"):
340
+ mask_shift = self.variables["cache"]["cache_index"]
341
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
342
+ causal_mask = lax.dynamic_slice(
343
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
344
+ )
345
+ else:
346
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
347
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
348
+
349
+ # combine masks if needed
350
+ if attention_mask is not None and self.causal:
351
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
352
+ attention_mask = combine_masks(attention_mask, causal_mask)
353
+ elif self.causal:
354
+ attention_mask = causal_mask
355
+ elif attention_mask is not None:
356
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
357
+
358
+ # During fast autoregressive decoding, we feed one position at a time,
359
+ # and cache the keys and values step by step.
360
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
361
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
362
+ key_states, value_states, query_states, attention_mask
363
+ )
364
+
365
+ # Convert the boolean attention mask to an attention bias.
366
+ if attention_mask is not None:
367
+ # attention mask in the form of attention bias
368
+ attention_bias = lax.select(
369
+ attention_mask > 0,
370
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
371
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
372
+ )
373
+ else:
374
+ attention_bias = None
375
+
376
+ dropout_rng = None
377
+ if not deterministic and self.dropout > 0.0:
378
+ dropout_rng = self.make_rng("dropout")
379
+
380
+ attn_weights = dot_product_attention_weights(
381
+ query_states,
382
+ key_states,
383
+ bias=attention_bias,
384
+ dropout_rng=dropout_rng,
385
+ dropout_rate=self.dropout,
386
+ broadcast_dropout=True,
387
+ deterministic=deterministic,
388
+ dtype=self.dtype,
389
+ precision=None,
390
+ )
391
+
392
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
393
+ attn_output = self._merge_heads(attn_output)
394
+ attn_output = self.out_proj(attn_output)
395
+
396
+ return attn_output, attn_weights
397
+
398
+
399
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->BlenderbotSmall
400
+ class FlaxBlenderbotSmallEncoderLayer(nn.Module):
401
+ config: BlenderbotSmallConfig
402
+ dtype: jnp.dtype = jnp.float32
403
+
404
+ def setup(self) -> None:
405
+ self.embed_dim = self.config.d_model
406
+ self.self_attn = FlaxBlenderbotSmallAttention(
407
+ config=self.config,
408
+ embed_dim=self.embed_dim,
409
+ num_heads=self.config.encoder_attention_heads,
410
+ dropout=self.config.attention_dropout,
411
+ dtype=self.dtype,
412
+ )
413
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
414
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
415
+ self.activation_fn = ACT2FN[self.config.activation_function]
416
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
417
+ self.fc1 = nn.Dense(
418
+ self.config.encoder_ffn_dim,
419
+ dtype=self.dtype,
420
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
421
+ )
422
+ self.fc2 = nn.Dense(
423
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
424
+ )
425
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
426
+
427
+ def __call__(
428
+ self,
429
+ hidden_states: jnp.ndarray,
430
+ attention_mask: jnp.ndarray,
431
+ output_attentions: bool = True,
432
+ deterministic: bool = True,
433
+ ) -> Tuple[jnp.ndarray]:
434
+ residual = hidden_states
435
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
436
+
437
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
438
+ hidden_states = residual + hidden_states
439
+ hidden_states = self.self_attn_layer_norm(hidden_states)
440
+
441
+ residual = hidden_states
442
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
443
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
444
+ hidden_states = self.fc2(hidden_states)
445
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
446
+ hidden_states = residual + hidden_states
447
+ hidden_states = self.final_layer_norm(hidden_states)
448
+
449
+ outputs = (hidden_states,)
450
+
451
+ if output_attentions:
452
+ outputs += (attn_weights,)
453
+
454
+ return outputs
455
+
456
+
457
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->BlenderbotSmall
458
+ class FlaxBlenderbotSmallEncoderLayerCollection(nn.Module):
459
+ config: BlenderbotSmallConfig
460
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
461
+
462
+ def setup(self):
463
+ self.layers = [
464
+ FlaxBlenderbotSmallEncoderLayer(self.config, name=str(i), dtype=self.dtype)
465
+ for i in range(self.config.encoder_layers)
466
+ ]
467
+ self.layerdrop = self.config.encoder_layerdrop
468
+
469
+ def __call__(
470
+ self,
471
+ hidden_states,
472
+ attention_mask,
473
+ deterministic: bool = True,
474
+ output_attentions: bool = False,
475
+ output_hidden_states: bool = False,
476
+ return_dict: bool = True,
477
+ ):
478
+ all_attentions = () if output_attentions else None
479
+ all_hidden_states = () if output_hidden_states else None
480
+
481
+ for encoder_layer in self.layers:
482
+ if output_hidden_states:
483
+ all_hidden_states = all_hidden_states + (hidden_states,)
484
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
485
+ dropout_probability = random.uniform(0, 1)
486
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
487
+ layer_outputs = (None, None)
488
+ else:
489
+ layer_outputs = encoder_layer(
490
+ hidden_states,
491
+ attention_mask,
492
+ output_attentions,
493
+ deterministic,
494
+ )
495
+ hidden_states = layer_outputs[0]
496
+ if output_attentions:
497
+ all_attentions = all_attentions + (layer_outputs[1],)
498
+
499
+ if output_hidden_states:
500
+ all_hidden_states += (hidden_states,)
501
+
502
+ outputs = (hidden_states, all_hidden_states, all_attentions)
503
+
504
+ if not return_dict:
505
+ return tuple(v for v in outputs if v is not None)
506
+
507
+ return FlaxBaseModelOutput(
508
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
509
+ )
510
+
511
+
512
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->BlenderbotSmall
513
+ class FlaxBlenderbotSmallDecoderLayer(nn.Module):
514
+ config: BlenderbotSmallConfig
515
+ dtype: jnp.dtype = jnp.float32
516
+
517
+ def setup(self) -> None:
518
+ self.embed_dim = self.config.d_model
519
+ self.self_attn = FlaxBlenderbotSmallAttention(
520
+ config=self.config,
521
+ embed_dim=self.embed_dim,
522
+ num_heads=self.config.decoder_attention_heads,
523
+ dropout=self.config.attention_dropout,
524
+ causal=True,
525
+ dtype=self.dtype,
526
+ )
527
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
528
+ self.activation_fn = ACT2FN[self.config.activation_function]
529
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
530
+
531
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
532
+ self.encoder_attn = FlaxBlenderbotSmallAttention(
533
+ config=self.config,
534
+ embed_dim=self.embed_dim,
535
+ num_heads=self.config.decoder_attention_heads,
536
+ dropout=self.config.attention_dropout,
537
+ dtype=self.dtype,
538
+ )
539
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
540
+ self.fc1 = nn.Dense(
541
+ self.config.decoder_ffn_dim,
542
+ dtype=self.dtype,
543
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
544
+ )
545
+ self.fc2 = nn.Dense(
546
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
547
+ )
548
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
549
+
550
+ def __call__(
551
+ self,
552
+ hidden_states: jnp.ndarray,
553
+ attention_mask: jnp.ndarray,
554
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
555
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
556
+ init_cache: bool = False,
557
+ output_attentions: bool = True,
558
+ deterministic: bool = True,
559
+ ) -> Tuple[jnp.ndarray]:
560
+ residual = hidden_states
561
+
562
+ # Self Attention
563
+ hidden_states, self_attn_weights = self.self_attn(
564
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
565
+ )
566
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
567
+ hidden_states = residual + hidden_states
568
+ hidden_states = self.self_attn_layer_norm(hidden_states)
569
+
570
+ # Cross-Attention Block
571
+ cross_attn_weights = None
572
+ if encoder_hidden_states is not None:
573
+ residual = hidden_states
574
+
575
+ hidden_states, cross_attn_weights = self.encoder_attn(
576
+ hidden_states=hidden_states,
577
+ key_value_states=encoder_hidden_states,
578
+ attention_mask=encoder_attention_mask,
579
+ )
580
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
581
+ hidden_states = residual + hidden_states
582
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
583
+
584
+ # Fully Connected
585
+ residual = hidden_states
586
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
587
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
588
+ hidden_states = self.fc2(hidden_states)
589
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
590
+ hidden_states = residual + hidden_states
591
+ hidden_states = self.final_layer_norm(hidden_states)
592
+
593
+ outputs = (hidden_states,)
594
+
595
+ if output_attentions:
596
+ outputs += (self_attn_weights, cross_attn_weights)
597
+
598
+ return outputs
599
+
600
+
601
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->BlenderbotSmall
602
+ class FlaxBlenderbotSmallDecoderLayerCollection(nn.Module):
603
+ config: BlenderbotSmallConfig
604
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
605
+
606
+ def setup(self):
607
+ self.layers = [
608
+ FlaxBlenderbotSmallDecoderLayer(self.config, name=str(i), dtype=self.dtype)
609
+ for i in range(self.config.decoder_layers)
610
+ ]
611
+ self.layerdrop = self.config.decoder_layerdrop
612
+
613
+ def __call__(
614
+ self,
615
+ hidden_states,
616
+ attention_mask,
617
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
618
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
619
+ deterministic: bool = True,
620
+ init_cache: bool = False,
621
+ output_attentions: bool = False,
622
+ output_hidden_states: bool = False,
623
+ return_dict: bool = True,
624
+ ):
625
+ # decoder layers
626
+ all_hidden_states = () if output_hidden_states else None
627
+ all_self_attns = () if output_attentions else None
628
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
629
+
630
+ for decoder_layer in self.layers:
631
+ if output_hidden_states:
632
+ all_hidden_states += (hidden_states,)
633
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
634
+ dropout_probability = random.uniform(0, 1)
635
+ if not deterministic and (dropout_probability < self.layerdrop):
636
+ layer_outputs = (None, None, None)
637
+ else:
638
+ layer_outputs = decoder_layer(
639
+ hidden_states,
640
+ attention_mask=attention_mask,
641
+ encoder_hidden_states=encoder_hidden_states,
642
+ encoder_attention_mask=encoder_attention_mask,
643
+ init_cache=init_cache,
644
+ output_attentions=output_attentions,
645
+ deterministic=deterministic,
646
+ )
647
+
648
+ hidden_states = layer_outputs[0]
649
+ if output_attentions:
650
+ all_self_attns += (layer_outputs[1],)
651
+
652
+ if encoder_hidden_states is not None:
653
+ all_cross_attentions += (layer_outputs[2],)
654
+
655
+ # add hidden states from the last decoder layer
656
+ if output_hidden_states:
657
+ all_hidden_states += (hidden_states,)
658
+
659
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
660
+
661
+ if not return_dict:
662
+ return tuple(v for v in outputs if v is not None)
663
+
664
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
665
+ last_hidden_state=hidden_states,
666
+ hidden_states=all_hidden_states,
667
+ attentions=all_self_attns,
668
+ cross_attentions=all_cross_attentions,
669
+ )
670
+
671
+
672
+ class FlaxBlenderbotSmallEncoder(nn.Module):
673
+ config: BlenderbotSmallConfig
674
+ embed_tokens: nn.Embed
675
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
676
+
677
+ def setup(self):
678
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
679
+
680
+ embed_dim = self.config.d_model
681
+ self.padding_idx = self.config.pad_token_id
682
+ self.max_source_positions = self.config.max_position_embeddings
683
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
684
+
685
+ self.embed_positions = nn.Embed(
686
+ self.config.max_position_embeddings,
687
+ embed_dim,
688
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
689
+ )
690
+ self.layers = FlaxBlenderbotSmallEncoderLayerCollection(self.config, self.dtype)
691
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
692
+
693
+ def __call__(
694
+ self,
695
+ input_ids,
696
+ attention_mask,
697
+ position_ids,
698
+ output_attentions: bool = False,
699
+ output_hidden_states: bool = False,
700
+ return_dict: bool = True,
701
+ deterministic: bool = True,
702
+ ):
703
+ input_shape = input_ids.shape
704
+ input_ids = input_ids.reshape(-1, input_shape[-1])
705
+
706
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
707
+
708
+ embed_pos = self.embed_positions(position_ids)
709
+
710
+ hidden_states = inputs_embeds + embed_pos
711
+ hidden_states = self.layernorm_embedding(hidden_states)
712
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
713
+
714
+ outputs = self.layers(
715
+ hidden_states,
716
+ attention_mask,
717
+ deterministic=deterministic,
718
+ output_attentions=output_attentions,
719
+ output_hidden_states=output_hidden_states,
720
+ return_dict=return_dict,
721
+ )
722
+
723
+ if not return_dict:
724
+ return outputs
725
+
726
+ return FlaxBaseModelOutput(
727
+ last_hidden_state=outputs.last_hidden_state,
728
+ hidden_states=outputs.hidden_states,
729
+ attentions=outputs.attentions,
730
+ )
731
+
732
+
733
+ class FlaxBlenderbotSmallDecoder(nn.Module):
734
+ config: BlenderbotSmallConfig
735
+ embed_tokens: nn.Embed
736
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
737
+
738
+ def setup(self):
739
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
740
+
741
+ embed_dim = self.config.d_model
742
+ self.padding_idx = self.config.pad_token_id
743
+ self.max_target_positions = self.config.max_position_embeddings
744
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
745
+
746
+ self.embed_positions = nn.Embed(
747
+ self.config.max_position_embeddings,
748
+ embed_dim,
749
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
750
+ )
751
+
752
+ self.layers = FlaxBlenderbotSmallDecoderLayerCollection(self.config, self.dtype)
753
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
754
+
755
+ def __call__(
756
+ self,
757
+ input_ids,
758
+ attention_mask,
759
+ position_ids,
760
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
761
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
762
+ init_cache: bool = False,
763
+ output_attentions: bool = False,
764
+ output_hidden_states: bool = False,
765
+ return_dict: bool = True,
766
+ deterministic: bool = True,
767
+ ):
768
+ input_shape = input_ids.shape
769
+ input_ids = input_ids.reshape(-1, input_shape[-1])
770
+
771
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
772
+
773
+ # embed positions
774
+ positions = self.embed_positions(position_ids)
775
+
776
+ # BlenderbotSmall applies layer norm on inputs_embeds in decoder
777
+ inputs_embeds = self.layernorm_embedding(inputs_embeds)
778
+ hidden_states = inputs_embeds + positions
779
+
780
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
781
+
782
+ outputs = self.layers(
783
+ hidden_states,
784
+ attention_mask,
785
+ encoder_hidden_states,
786
+ encoder_attention_mask,
787
+ deterministic=deterministic,
788
+ init_cache=init_cache,
789
+ output_attentions=output_attentions,
790
+ output_hidden_states=output_hidden_states,
791
+ return_dict=return_dict,
792
+ )
793
+
794
+ if not return_dict:
795
+ return outputs
796
+
797
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
798
+ last_hidden_state=outputs.last_hidden_state,
799
+ hidden_states=outputs.hidden_states,
800
+ attentions=outputs.attentions,
801
+ cross_attentions=outputs.cross_attentions,
802
+ )
803
+
804
+
805
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->BlenderbotSmall
806
+ class FlaxBlenderbotSmallModule(nn.Module):
807
+ config: BlenderbotSmallConfig
808
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
809
+
810
+ def setup(self):
811
+ self.shared = nn.Embed(
812
+ self.config.vocab_size,
813
+ self.config.d_model,
814
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
815
+ dtype=self.dtype,
816
+ )
817
+
818
+ self.encoder = FlaxBlenderbotSmallEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
819
+ self.decoder = FlaxBlenderbotSmallDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
820
+
821
+ def _get_encoder_module(self):
822
+ return self.encoder
823
+
824
+ def _get_decoder_module(self):
825
+ return self.decoder
826
+
827
+ def __call__(
828
+ self,
829
+ input_ids,
830
+ attention_mask,
831
+ decoder_input_ids,
832
+ decoder_attention_mask,
833
+ position_ids,
834
+ decoder_position_ids,
835
+ output_attentions: bool = False,
836
+ output_hidden_states: bool = False,
837
+ return_dict: bool = True,
838
+ deterministic: bool = True,
839
+ ):
840
+ encoder_outputs = self.encoder(
841
+ input_ids=input_ids,
842
+ attention_mask=attention_mask,
843
+ position_ids=position_ids,
844
+ output_attentions=output_attentions,
845
+ output_hidden_states=output_hidden_states,
846
+ return_dict=return_dict,
847
+ deterministic=deterministic,
848
+ )
849
+
850
+ decoder_outputs = self.decoder(
851
+ input_ids=decoder_input_ids,
852
+ attention_mask=decoder_attention_mask,
853
+ position_ids=decoder_position_ids,
854
+ encoder_hidden_states=encoder_outputs[0],
855
+ encoder_attention_mask=attention_mask,
856
+ output_attentions=output_attentions,
857
+ output_hidden_states=output_hidden_states,
858
+ return_dict=return_dict,
859
+ deterministic=deterministic,
860
+ )
861
+
862
+ if not return_dict:
863
+ return decoder_outputs + encoder_outputs
864
+
865
+ return FlaxSeq2SeqModelOutput(
866
+ last_hidden_state=decoder_outputs.last_hidden_state,
867
+ decoder_hidden_states=decoder_outputs.hidden_states,
868
+ decoder_attentions=decoder_outputs.attentions,
869
+ cross_attentions=decoder_outputs.cross_attentions,
870
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
871
+ encoder_hidden_states=encoder_outputs.hidden_states,
872
+ encoder_attentions=encoder_outputs.attentions,
873
+ )
874
+
875
+
876
+ class FlaxBlenderbotSmallPreTrainedModel(FlaxPreTrainedModel):
877
+ config_class = BlenderbotSmallConfig
878
+ base_model_prefix: str = "model"
879
+ module_class: nn.Module = None
880
+
881
+ def __init__(
882
+ self,
883
+ config: BlenderbotSmallConfig,
884
+ input_shape: Tuple[int] = (1, 1),
885
+ seed: int = 0,
886
+ dtype: jnp.dtype = jnp.float32,
887
+ _do_init: bool = True,
888
+ **kwargs,
889
+ ):
890
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
891
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
892
+
893
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
894
+ # init input tensors
895
+ input_ids = jnp.zeros(input_shape, dtype="i4")
896
+ # make sure initialization pass will work for FlaxBlenderbotSmallForSequenceClassificationModule
897
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
898
+ attention_mask = jnp.ones_like(input_ids)
899
+ decoder_input_ids = input_ids
900
+ decoder_attention_mask = jnp.ones_like(input_ids)
901
+
902
+ batch_size, sequence_length = input_ids.shape
903
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
904
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
905
+
906
+ params_rng, dropout_rng = jax.random.split(rng)
907
+ rngs = {"params": params_rng, "dropout": dropout_rng}
908
+
909
+ random_params = self.module.init(
910
+ rngs,
911
+ input_ids,
912
+ attention_mask,
913
+ decoder_input_ids,
914
+ decoder_attention_mask,
915
+ position_ids,
916
+ decoder_position_ids,
917
+ )["params"]
918
+
919
+ if params is not None:
920
+ random_params = flatten_dict(unfreeze(random_params))
921
+ params = flatten_dict(unfreeze(params))
922
+ for missing_key in self._missing_keys:
923
+ params[missing_key] = random_params[missing_key]
924
+ self._missing_keys = set()
925
+ return freeze(unflatten_dict(params))
926
+ else:
927
+ return random_params
928
+
929
+ def init_cache(self, batch_size, max_length, encoder_outputs):
930
+ r"""
931
+ Args:
932
+ batch_size (`int`):
933
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
934
+ max_length (`int`):
935
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
936
+ cache.
937
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
938
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
939
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
940
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
941
+ cross-attention of the decoder.
942
+ """
943
+ # init input variables to retrieve cache
944
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
945
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
946
+ decoder_position_ids = jnp.broadcast_to(
947
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
948
+ )
949
+
950
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
951
+ decoder_module = module._get_decoder_module()
952
+ return decoder_module(
953
+ decoder_input_ids,
954
+ decoder_attention_mask,
955
+ decoder_position_ids,
956
+ **kwargs,
957
+ )
958
+
959
+ init_variables = self.module.init(
960
+ jax.random.PRNGKey(0),
961
+ decoder_input_ids=decoder_input_ids,
962
+ decoder_attention_mask=decoder_attention_mask,
963
+ decoder_position_ids=decoder_position_ids,
964
+ encoder_hidden_states=encoder_outputs[0],
965
+ init_cache=True,
966
+ method=_decoder_forward, # we only need to call the decoder to init the cache
967
+ )
968
+ return unfreeze(init_variables["cache"])
969
+
970
+ @add_start_docstrings(BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING)
971
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotSmallConfig)
972
+ def encode(
973
+ self,
974
+ input_ids: jnp.ndarray,
975
+ attention_mask: Optional[jnp.ndarray] = None,
976
+ position_ids: Optional[jnp.ndarray] = None,
977
+ output_attentions: Optional[bool] = None,
978
+ output_hidden_states: Optional[bool] = None,
979
+ return_dict: Optional[bool] = None,
980
+ train: bool = False,
981
+ params: dict = None,
982
+ dropout_rng: PRNGKey = None,
983
+ ):
984
+ r"""
985
+ Returns:
986
+
987
+ Example:
988
+
989
+ ```python
990
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
991
+
992
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
993
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
994
+
995
+ >>> text = "My friends are cool but they eat too many carbs."
996
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
997
+ >>> encoder_outputs = model.encode(**inputs)
998
+ ```"""
999
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1000
+ output_hidden_states = (
1001
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1002
+ )
1003
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1004
+
1005
+ if attention_mask is None:
1006
+ attention_mask = jnp.ones_like(input_ids)
1007
+ if position_ids is None:
1008
+ batch_size, sequence_length = input_ids.shape
1009
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1010
+
1011
+ # Handle any PRNG if needed
1012
+ rngs = {}
1013
+ if dropout_rng is not None:
1014
+ rngs["dropout"] = dropout_rng
1015
+
1016
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
1017
+ encode_module = module._get_encoder_module()
1018
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
1019
+
1020
+ return self.module.apply(
1021
+ {"params": params or self.params},
1022
+ input_ids=jnp.array(input_ids, dtype="i4"),
1023
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1024
+ position_ids=jnp.array(position_ids, dtype="i4"),
1025
+ output_attentions=output_attentions,
1026
+ output_hidden_states=output_hidden_states,
1027
+ return_dict=return_dict,
1028
+ deterministic=not train,
1029
+ rngs=rngs,
1030
+ method=_encoder_forward,
1031
+ )
1032
+
1033
+ @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING)
1034
+ @replace_return_docstrings(
1035
+ output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotSmallConfig
1036
+ )
1037
+ def decode(
1038
+ self,
1039
+ decoder_input_ids,
1040
+ encoder_outputs,
1041
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1042
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1043
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1044
+ past_key_values: dict = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ train: bool = False,
1049
+ params: dict = None,
1050
+ dropout_rng: PRNGKey = None,
1051
+ ):
1052
+ r"""
1053
+ Returns:
1054
+
1055
+ Example:
1056
+
1057
+ ```python
1058
+ >>> import jax.numpy as jnp
1059
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1060
+
1061
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1062
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1063
+
1064
+ >>> text = "My friends are cool but they eat too many carbs."
1065
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1066
+ >>> encoder_outputs = model.encode(**inputs)
1067
+
1068
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1069
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1070
+
1071
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1072
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
1073
+ ```"""
1074
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1075
+ output_hidden_states = (
1076
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1077
+ )
1078
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1079
+
1080
+ encoder_hidden_states = encoder_outputs[0]
1081
+ if encoder_attention_mask is None:
1082
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1083
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1084
+
1085
+ batch_size, sequence_length = decoder_input_ids.shape
1086
+ if decoder_attention_mask is None:
1087
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1088
+
1089
+ if decoder_position_ids is None:
1090
+ if past_key_values is not None:
1091
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1092
+
1093
+ decoder_position_ids = jnp.broadcast_to(
1094
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1095
+ )
1096
+
1097
+ # Handle any PRNG if needed
1098
+ rngs = {}
1099
+ if dropout_rng is not None:
1100
+ rngs["dropout"] = dropout_rng
1101
+
1102
+ inputs = {"params": params or self.params}
1103
+
1104
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1105
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1106
+ # it can be changed by FlaxBlenderbotSmallAttention module
1107
+ if past_key_values:
1108
+ inputs["cache"] = past_key_values
1109
+ mutable = ["cache"]
1110
+ else:
1111
+ mutable = False
1112
+
1113
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1114
+ decoder_module = module._get_decoder_module()
1115
+ return decoder_module(
1116
+ decoder_input_ids,
1117
+ decoder_attention_mask,
1118
+ decoder_position_ids,
1119
+ **kwargs,
1120
+ )
1121
+
1122
+ outputs = self.module.apply(
1123
+ inputs,
1124
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1125
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1126
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1127
+ encoder_hidden_states=encoder_hidden_states,
1128
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1129
+ output_attentions=output_attentions,
1130
+ output_hidden_states=output_hidden_states,
1131
+ return_dict=return_dict,
1132
+ deterministic=not train,
1133
+ rngs=rngs,
1134
+ mutable=mutable,
1135
+ method=_decoder_forward,
1136
+ )
1137
+
1138
+ # add updated cache to model output
1139
+ if past_key_values is not None and return_dict:
1140
+ outputs, past = outputs
1141
+ outputs["past_key_values"] = unfreeze(past["cache"])
1142
+ return outputs
1143
+ elif past_key_values is not None and not return_dict:
1144
+ outputs, past = outputs
1145
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1146
+
1147
+ return outputs
1148
+
1149
+ def __call__(
1150
+ self,
1151
+ input_ids: jnp.ndarray,
1152
+ attention_mask: Optional[jnp.ndarray] = None,
1153
+ decoder_input_ids: Optional[jnp.ndarray] = None,
1154
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1155
+ position_ids: Optional[jnp.ndarray] = None,
1156
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1157
+ output_attentions: Optional[bool] = None,
1158
+ output_hidden_states: Optional[bool] = None,
1159
+ return_dict: Optional[bool] = None,
1160
+ train: bool = False,
1161
+ params: dict = None,
1162
+ dropout_rng: PRNGKey = None,
1163
+ ):
1164
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1165
+ output_hidden_states = (
1166
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1167
+ )
1168
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1169
+
1170
+ # prepare encoder inputs
1171
+ if attention_mask is None:
1172
+ attention_mask = jnp.ones_like(input_ids)
1173
+ if position_ids is None:
1174
+ batch_size, sequence_length = input_ids.shape
1175
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1176
+
1177
+ # prepare decoder inputs
1178
+ if decoder_input_ids is None:
1179
+ decoder_input_ids = shift_tokens_right(
1180
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
1181
+ )
1182
+ if decoder_attention_mask is None:
1183
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1184
+ if decoder_position_ids is None:
1185
+ batch_size, sequence_length = decoder_input_ids.shape
1186
+ decoder_position_ids = jnp.broadcast_to(
1187
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1188
+ )
1189
+
1190
+ # Handle any PRNG if needed
1191
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1192
+
1193
+ return self.module.apply(
1194
+ {"params": params or self.params},
1195
+ input_ids=jnp.array(input_ids, dtype="i4"),
1196
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1197
+ position_ids=jnp.array(position_ids, dtype="i4"),
1198
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1199
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1200
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ deterministic=not train,
1205
+ rngs=rngs,
1206
+ )
1207
+
1208
+
1209
+ @add_start_docstrings(
1210
+ "The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top.",
1211
+ BLENDERBOT_SMALL_START_DOCSTRING,
1212
+ )
1213
+ class FlaxBlenderbotSmallModel(FlaxBlenderbotSmallPreTrainedModel):
1214
+ config: BlenderbotSmallConfig
1215
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
1216
+ module_class = FlaxBlenderbotSmallModule
1217
+
1218
+
1219
+ append_call_sample_docstring(FlaxBlenderbotSmallModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
1220
+
1221
+
1222
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->BlenderbotSmall
1223
+ class FlaxBlenderbotSmallForConditionalGenerationModule(nn.Module):
1224
+ config: BlenderbotSmallConfig
1225
+ dtype: jnp.dtype = jnp.float32
1226
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
1227
+
1228
+ def setup(self):
1229
+ self.model = FlaxBlenderbotSmallModule(config=self.config, dtype=self.dtype)
1230
+ self.lm_head = nn.Dense(
1231
+ self.model.shared.num_embeddings,
1232
+ use_bias=False,
1233
+ dtype=self.dtype,
1234
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1235
+ )
1236
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
1237
+
1238
+ def _get_encoder_module(self):
1239
+ return self.model.encoder
1240
+
1241
+ def _get_decoder_module(self):
1242
+ return self.model.decoder
1243
+
1244
+ def __call__(
1245
+ self,
1246
+ input_ids,
1247
+ attention_mask,
1248
+ decoder_input_ids,
1249
+ decoder_attention_mask,
1250
+ position_ids,
1251
+ decoder_position_ids,
1252
+ output_attentions: bool = False,
1253
+ output_hidden_states: bool = False,
1254
+ return_dict: bool = True,
1255
+ deterministic: bool = True,
1256
+ ):
1257
+ outputs = self.model(
1258
+ input_ids=input_ids,
1259
+ attention_mask=attention_mask,
1260
+ decoder_input_ids=decoder_input_ids,
1261
+ decoder_attention_mask=decoder_attention_mask,
1262
+ position_ids=position_ids,
1263
+ decoder_position_ids=decoder_position_ids,
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ deterministic=deterministic,
1268
+ )
1269
+
1270
+ hidden_states = outputs[0]
1271
+
1272
+ if self.config.tie_word_embeddings:
1273
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
1274
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1275
+ else:
1276
+ lm_logits = self.lm_head(hidden_states)
1277
+
1278
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
1279
+
1280
+ if not return_dict:
1281
+ output = (lm_logits,) + outputs[1:]
1282
+ return output
1283
+
1284
+ return FlaxSeq2SeqLMOutput(
1285
+ logits=lm_logits,
1286
+ decoder_hidden_states=outputs.decoder_hidden_states,
1287
+ decoder_attentions=outputs.decoder_attentions,
1288
+ cross_attentions=outputs.cross_attentions,
1289
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1290
+ encoder_hidden_states=outputs.encoder_hidden_states,
1291
+ encoder_attentions=outputs.encoder_attentions,
1292
+ )
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
1297
+ BLENDERBOT_SMALL_START_DOCSTRING,
1298
+ )
1299
+ class FlaxBlenderbotSmallForConditionalGeneration(FlaxBlenderbotSmallPreTrainedModel):
1300
+ module_class = FlaxBlenderbotSmallForConditionalGenerationModule
1301
+ dtype: jnp.dtype = jnp.float32
1302
+
1303
+ @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING)
1304
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotSmallConfig)
1305
+ def decode(
1306
+ self,
1307
+ decoder_input_ids,
1308
+ encoder_outputs,
1309
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1310
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1311
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1312
+ past_key_values: dict = None,
1313
+ output_attentions: Optional[bool] = None,
1314
+ output_hidden_states: Optional[bool] = None,
1315
+ return_dict: Optional[bool] = None,
1316
+ deterministic: bool = True,
1317
+ params: dict = None,
1318
+ dropout_rng: PRNGKey = None,
1319
+ ):
1320
+ r"""
1321
+ Returns:
1322
+
1323
+ Example:
1324
+
1325
+ ```python
1326
+ >>> import jax.numpy as jnp
1327
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1328
+
1329
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1330
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1331
+
1332
+ >>> text = "My friends are cool but they eat too many carbs."
1333
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1334
+ >>> encoder_outputs = model.encode(**inputs)
1335
+
1336
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1337
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1338
+
1339
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1340
+ >>> logits = outputs.logits
1341
+ ```"""
1342
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1343
+ output_hidden_states = (
1344
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1345
+ )
1346
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1347
+
1348
+ encoder_hidden_states = encoder_outputs[0]
1349
+ if encoder_attention_mask is None:
1350
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1351
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1352
+
1353
+ batch_size, sequence_length = decoder_input_ids.shape
1354
+ if decoder_attention_mask is None:
1355
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1356
+
1357
+ if decoder_position_ids is None:
1358
+ if past_key_values is not None:
1359
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1360
+
1361
+ decoder_position_ids = jnp.broadcast_to(
1362
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1363
+ )
1364
+
1365
+ # Handle any PRNG if needed
1366
+ rngs = {}
1367
+ if dropout_rng is not None:
1368
+ rngs["dropout"] = dropout_rng
1369
+
1370
+ inputs = {"params": params or self.params}
1371
+
1372
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1373
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1374
+ # it can be changed by FlaxBlenderbotSmallAttention module
1375
+ if past_key_values:
1376
+ inputs["cache"] = past_key_values
1377
+ mutable = ["cache"]
1378
+ else:
1379
+ mutable = False
1380
+
1381
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1382
+ decoder_module = module._get_decoder_module()
1383
+ outputs = decoder_module(
1384
+ decoder_input_ids,
1385
+ decoder_attention_mask,
1386
+ decoder_position_ids,
1387
+ **kwargs,
1388
+ )
1389
+ hidden_states = outputs[0]
1390
+
1391
+ if self.config.tie_word_embeddings:
1392
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
1393
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1394
+ else:
1395
+ lm_logits = module.lm_head(hidden_states)
1396
+
1397
+ lm_logits += module.final_logits_bias.astype(self.dtype)
1398
+ return lm_logits, outputs
1399
+
1400
+ outputs = self.module.apply(
1401
+ inputs,
1402
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1403
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1404
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1405
+ encoder_hidden_states=encoder_hidden_states,
1406
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ deterministic=deterministic,
1411
+ rngs=rngs,
1412
+ mutable=mutable,
1413
+ method=_decoder_forward,
1414
+ )
1415
+
1416
+ if past_key_values is None:
1417
+ lm_logits, decoder_outputs = outputs
1418
+ else:
1419
+ (lm_logits, decoder_outputs), past = outputs
1420
+
1421
+ if return_dict:
1422
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
1423
+ logits=lm_logits,
1424
+ hidden_states=decoder_outputs.hidden_states,
1425
+ attentions=decoder_outputs.attentions,
1426
+ cross_attentions=decoder_outputs.cross_attentions,
1427
+ )
1428
+ else:
1429
+ outputs = (lm_logits,) + decoder_outputs[1:]
1430
+
1431
+ # add updated cache to model output
1432
+ if past_key_values is not None and return_dict:
1433
+ outputs["past_key_values"] = unfreeze(past["cache"])
1434
+ return outputs
1435
+ elif past_key_values is not None and not return_dict:
1436
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1437
+
1438
+ return outputs
1439
+
1440
+ def prepare_inputs_for_generation(
1441
+ self,
1442
+ decoder_input_ids,
1443
+ max_length,
1444
+ attention_mask: Optional[jax.Array] = None,
1445
+ decoder_attention_mask: Optional[jax.Array] = None,
1446
+ encoder_outputs=None,
1447
+ **kwargs,
1448
+ ):
1449
+ # initializing the cache
1450
+ batch_size, seq_length = decoder_input_ids.shape
1451
+
1452
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1453
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1454
+ # But since the decoder uses a causal mask, those positions are masked anyways.
1455
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
1456
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1457
+ if decoder_attention_mask is not None:
1458
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1459
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
1460
+ else:
1461
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1462
+
1463
+ return {
1464
+ "past_key_values": past_key_values,
1465
+ "encoder_outputs": encoder_outputs,
1466
+ "encoder_attention_mask": attention_mask,
1467
+ "decoder_attention_mask": extended_attention_mask,
1468
+ "decoder_position_ids": position_ids,
1469
+ }
1470
+
1471
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1472
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1473
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
1474
+ return model_kwargs
1475
+
1476
+
1477
+ FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING = """
1478
+ Returns:
1479
+
1480
+ Summarization example:
1481
+
1482
+ ```py
1483
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1484
+
1485
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1486
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1487
+
1488
+ >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
1489
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
1490
+
1491
+ >>> # Generate Summary
1492
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
1493
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
1494
+ ```
1495
+
1496
+ Mask filling example:
1497
+
1498
+ ```py
1499
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1500
+
1501
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1502
+ >>> TXT = "My friends are <mask> but they eat too many carbs."
1503
+
1504
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1505
+ >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"]
1506
+ >>> logits = model(input_ids).logits
1507
+
1508
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
1509
+ >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
1510
+ >>> values, predictions = jax.lax.top_k(probs)
1511
+
1512
+ >>> tokenizer.decode(predictions).split()
1513
+ ```
1514
+ """
1515
+
1516
+ overwrite_call_docstring(
1517
+ FlaxBlenderbotSmallForConditionalGeneration,
1518
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING + FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING,
1519
+ )
1520
+ append_replace_return_docstrings(
1521
+ FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
1522
+ )
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py ADDED
@@ -0,0 +1,1526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 BlenderbotSmall model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFSeq2SeqLMOutput,
31
+ TFSeq2SeqModelOutput,
32
+ )
33
+
34
+ # Public API
35
+ from ...modeling_tf_utils import (
36
+ TFCausalLanguageModelingLoss,
37
+ TFPreTrainedModel,
38
+ keras,
39
+ keras_serializable,
40
+ unpack_inputs,
41
+ )
42
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_end_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
57
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
58
+
59
+
60
+ LARGE_NEGATIVE = -1e8
61
+
62
+
63
+ # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
64
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
65
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
66
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
67
+ start_tokens = tf.fill(
68
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
69
+ )
70
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
71
+ # replace possible -100 values in labels by `pad_token_id`
72
+ shifted_input_ids = tf.where(
73
+ shifted_input_ids == -100,
74
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
75
+ shifted_input_ids,
76
+ )
77
+
78
+ # "Verify that `labels` has only positive values and -100"
79
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
80
+
81
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
82
+ with tf.control_dependencies([assert_gte0]):
83
+ shifted_input_ids = tf.identity(shifted_input_ids)
84
+
85
+ return shifted_input_ids
86
+
87
+
88
+ # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
89
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
90
+ """
91
+ Make causal mask used for bi-directional self-attention.
92
+ """
93
+ bsz = input_ids_shape[0]
94
+ tgt_len = input_ids_shape[1]
95
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
96
+ mask_cond = tf.range(shape_list(mask)[-1])
97
+
98
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
99
+
100
+ if past_key_values_length > 0:
101
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
102
+
103
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
104
+
105
+
106
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
107
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
108
+ """
109
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
110
+ """
111
+ src_len = shape_list(mask)[1]
112
+ tgt_len = tgt_len if tgt_len is not None else src_len
113
+ one_cst = tf.constant(1.0)
114
+ mask = tf.cast(mask, dtype=one_cst.dtype)
115
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
116
+
117
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
118
+
119
+
120
+ # Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
121
+ class TFBlenderbotSmallLearnedPositionalEmbedding(keras.layers.Embedding):
122
+ """
123
+ This module learns positional embeddings up to a fixed maximum size.
124
+ """
125
+
126
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
127
+ super().__init__(num_embeddings, embedding_dim, **kwargs)
128
+
129
+ def call(
130
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
131
+ ):
132
+ """Input is expected to be of size [bsz x seqlen]."""
133
+ if position_ids is None:
134
+ seq_len = input_shape[1]
135
+ position_ids = tf.range(seq_len, delta=1, name="range")
136
+ position_ids += past_key_values_length
137
+
138
+ return super().call(tf.cast(position_ids, dtype=tf.int32))
139
+
140
+
141
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall
142
+ class TFBlenderbotSmallAttention(keras.layers.Layer):
143
+ """Multi-headed attention from "Attention Is All You Need"""
144
+
145
+ def __init__(
146
+ self,
147
+ embed_dim: int,
148
+ num_heads: int,
149
+ dropout: float = 0.0,
150
+ is_decoder: bool = False,
151
+ bias: bool = True,
152
+ **kwargs,
153
+ ):
154
+ super().__init__(**kwargs)
155
+ self.embed_dim = embed_dim
156
+
157
+ self.num_heads = num_heads
158
+ self.dropout = keras.layers.Dropout(dropout)
159
+ self.head_dim = embed_dim // num_heads
160
+ if (self.head_dim * num_heads) != self.embed_dim:
161
+ raise ValueError(
162
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
163
+ f" and `num_heads`: {num_heads})."
164
+ )
165
+ self.scaling = self.head_dim**-0.5
166
+ self.is_decoder = is_decoder
167
+
168
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
169
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
170
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
171
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
172
+
173
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
174
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
175
+
176
+ def call(
177
+ self,
178
+ hidden_states: tf.Tensor,
179
+ key_value_states: tf.Tensor | None = None,
180
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
181
+ attention_mask: tf.Tensor | None = None,
182
+ layer_head_mask: tf.Tensor | None = None,
183
+ training: Optional[bool] = False,
184
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
185
+ """Input shape: Batch x Time x Channel"""
186
+
187
+ # if key_value_states are provided this layer is used as a cross-attention layer
188
+ # for the decoder
189
+ is_cross_attention = key_value_states is not None
190
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
191
+
192
+ # get query proj
193
+ query_states = self.q_proj(hidden_states) * self.scaling
194
+ # get key, value proj
195
+ if is_cross_attention and past_key_value is not None:
196
+ # reuse k,v, cross_attentions
197
+ key_states = past_key_value[0]
198
+ value_states = past_key_value[1]
199
+ elif is_cross_attention:
200
+ # cross_attentions
201
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
202
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
203
+ elif past_key_value is not None:
204
+ # reuse k, v, self_attention
205
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
206
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
207
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
208
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
209
+ else:
210
+ # self_attention
211
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
212
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
213
+
214
+ if self.is_decoder:
215
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
216
+ # Further calls to cross_attention layer can then reuse all cross-attention
217
+ # key/value_states (first "if" case)
218
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
219
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
220
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
221
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
222
+ past_key_value = (key_states, value_states)
223
+
224
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
225
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
226
+ key_states = tf.reshape(key_states, proj_shape)
227
+ value_states = tf.reshape(value_states, proj_shape)
228
+
229
+ src_len = shape_list(key_states)[1]
230
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
231
+
232
+ tf.debugging.assert_equal(
233
+ shape_list(attn_weights),
234
+ [bsz * self.num_heads, tgt_len, src_len],
235
+ message=(
236
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
237
+ f" {shape_list(attn_weights)}"
238
+ ),
239
+ )
240
+
241
+ if attention_mask is not None:
242
+ tf.debugging.assert_equal(
243
+ shape_list(attention_mask),
244
+ [bsz, 1, tgt_len, src_len],
245
+ message=(
246
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
247
+ f" {shape_list(attention_mask)}"
248
+ ),
249
+ )
250
+
251
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
252
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
253
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
254
+
255
+ attn_weights = stable_softmax(attn_weights, axis=-1)
256
+
257
+ if layer_head_mask is not None:
258
+ tf.debugging.assert_equal(
259
+ shape_list(layer_head_mask),
260
+ [self.num_heads],
261
+ message=(
262
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
263
+ f" {shape_list(layer_head_mask)}"
264
+ ),
265
+ )
266
+
267
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
268
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
269
+ )
270
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
271
+
272
+ attn_probs = self.dropout(attn_weights, training=training)
273
+ attn_output = tf.matmul(attn_probs, value_states)
274
+
275
+ tf.debugging.assert_equal(
276
+ shape_list(attn_output),
277
+ [bsz * self.num_heads, tgt_len, self.head_dim],
278
+ message=(
279
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
280
+ f" {shape_list(attn_output)}"
281
+ ),
282
+ )
283
+
284
+ attn_output = tf.transpose(
285
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
286
+ )
287
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
288
+
289
+ attn_output = self.out_proj(attn_output)
290
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
291
+
292
+ return attn_output, attn_weights, past_key_value
293
+
294
+ def build(self, input_shape=None):
295
+ if self.built:
296
+ return
297
+ self.built = True
298
+ if getattr(self, "k_proj", None) is not None:
299
+ with tf.name_scope(self.k_proj.name):
300
+ self.k_proj.build([None, None, self.embed_dim])
301
+ if getattr(self, "q_proj", None) is not None:
302
+ with tf.name_scope(self.q_proj.name):
303
+ self.q_proj.build([None, None, self.embed_dim])
304
+ if getattr(self, "v_proj", None) is not None:
305
+ with tf.name_scope(self.v_proj.name):
306
+ self.v_proj.build([None, None, self.embed_dim])
307
+ if getattr(self, "out_proj", None) is not None:
308
+ with tf.name_scope(self.out_proj.name):
309
+ self.out_proj.build([None, None, self.embed_dim])
310
+
311
+
312
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall
313
+ class TFBlenderbotSmallEncoderLayer(keras.layers.Layer):
314
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
315
+ super().__init__(**kwargs)
316
+ self.embed_dim = config.d_model
317
+ self.self_attn = TFBlenderbotSmallAttention(
318
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
319
+ )
320
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
321
+ self.dropout = keras.layers.Dropout(config.dropout)
322
+ self.activation_fn = get_tf_activation(config.activation_function)
323
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
324
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
325
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
326
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
327
+ self.config = config
328
+
329
+ def call(
330
+ self,
331
+ hidden_states: tf.Tensor,
332
+ attention_mask: np.ndarray | tf.Tensor | None,
333
+ layer_head_mask: tf.Tensor | None,
334
+ training: Optional[bool] = False,
335
+ ) -> tf.Tensor:
336
+ """
337
+ Args:
338
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
339
+ attention_mask (`tf.Tensor`): attention mask of size
340
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
341
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
342
+ `(encoder_attention_heads,)`
343
+ """
344
+ residual = hidden_states
345
+ hidden_states, self_attn_weights, _ = self.self_attn(
346
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
347
+ )
348
+
349
+ tf.debugging.assert_equal(
350
+ shape_list(hidden_states),
351
+ shape_list(residual),
352
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
353
+ )
354
+
355
+ hidden_states = self.dropout(hidden_states, training=training)
356
+ hidden_states = residual + hidden_states
357
+ hidden_states = self.self_attn_layer_norm(hidden_states)
358
+
359
+ residual = hidden_states
360
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
361
+ hidden_states = self.activation_dropout(hidden_states, training=training)
362
+ hidden_states = self.fc2(hidden_states)
363
+ hidden_states = self.dropout(hidden_states, training=training)
364
+ hidden_states = residual + hidden_states
365
+ hidden_states = self.final_layer_norm(hidden_states)
366
+
367
+ return hidden_states, self_attn_weights
368
+
369
+ def build(self, input_shape=None):
370
+ if self.built:
371
+ return
372
+ self.built = True
373
+ if getattr(self, "self_attn", None) is not None:
374
+ with tf.name_scope(self.self_attn.name):
375
+ self.self_attn.build(None)
376
+ if getattr(self, "self_attn_layer_norm", None) is not None:
377
+ with tf.name_scope(self.self_attn_layer_norm.name):
378
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
379
+ if getattr(self, "fc1", None) is not None:
380
+ with tf.name_scope(self.fc1.name):
381
+ self.fc1.build([None, None, self.embed_dim])
382
+ if getattr(self, "fc2", None) is not None:
383
+ with tf.name_scope(self.fc2.name):
384
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
385
+ if getattr(self, "final_layer_norm", None) is not None:
386
+ with tf.name_scope(self.final_layer_norm.name):
387
+ self.final_layer_norm.build([None, None, self.embed_dim])
388
+
389
+
390
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall
391
+ class TFBlenderbotSmallDecoderLayer(keras.layers.Layer):
392
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
393
+ super().__init__(**kwargs)
394
+ self.embed_dim = config.d_model
395
+ self.self_attn = TFBlenderbotSmallAttention(
396
+ embed_dim=self.embed_dim,
397
+ num_heads=config.decoder_attention_heads,
398
+ dropout=config.attention_dropout,
399
+ name="self_attn",
400
+ is_decoder=True,
401
+ )
402
+ self.dropout = keras.layers.Dropout(config.dropout)
403
+ self.activation_fn = get_tf_activation(config.activation_function)
404
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
405
+
406
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
407
+ self.encoder_attn = TFBlenderbotSmallAttention(
408
+ self.embed_dim,
409
+ config.decoder_attention_heads,
410
+ dropout=config.attention_dropout,
411
+ name="encoder_attn",
412
+ is_decoder=True,
413
+ )
414
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
415
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
416
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
417
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
418
+ self.config = config
419
+
420
+ def call(
421
+ self,
422
+ hidden_states: tf.Tensor,
423
+ attention_mask: np.ndarray | tf.Tensor | None = None,
424
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
425
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
426
+ layer_head_mask: tf.Tensor | None = None,
427
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
428
+ past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
429
+ training: Optional[bool] = False,
430
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
431
+ """
432
+ Args:
433
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
434
+ attention_mask (`tf.Tensor`): attention mask of size
435
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
436
+ encoder_hidden_states (`tf.Tensor`):
437
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
438
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
439
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
440
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
441
+ `(decoder_attention_heads,)`
442
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
443
+ `(decoder_attention_heads,)`
444
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
445
+ """
446
+ residual = hidden_states
447
+
448
+ # Self Attention
449
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
450
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
451
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
452
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
453
+ hidden_states=hidden_states,
454
+ past_key_value=self_attn_past_key_value,
455
+ attention_mask=attention_mask,
456
+ layer_head_mask=layer_head_mask,
457
+ )
458
+ hidden_states = self.dropout(hidden_states, training=training)
459
+ hidden_states = residual + hidden_states
460
+ hidden_states = self.self_attn_layer_norm(hidden_states)
461
+
462
+ # Cross-Attention Block
463
+ cross_attn_present_key_value = None
464
+ cross_attn_weights = None
465
+ if encoder_hidden_states is not None:
466
+ residual = hidden_states
467
+
468
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
469
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
470
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
471
+ hidden_states=hidden_states,
472
+ key_value_states=encoder_hidden_states,
473
+ attention_mask=encoder_attention_mask,
474
+ layer_head_mask=cross_attn_layer_head_mask,
475
+ past_key_value=cross_attn_past_key_value,
476
+ )
477
+ hidden_states = self.dropout(hidden_states, training=training)
478
+ hidden_states = residual + hidden_states
479
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
480
+
481
+ # add cross-attn to positions 3,4 of present_key_value tuple
482
+ present_key_value = present_key_value + cross_attn_present_key_value
483
+
484
+ # Fully Connected
485
+ residual = hidden_states
486
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
487
+ hidden_states = self.activation_dropout(hidden_states, training=training)
488
+ hidden_states = self.fc2(hidden_states)
489
+ hidden_states = self.dropout(hidden_states, training=training)
490
+ hidden_states = residual + hidden_states
491
+ hidden_states = self.final_layer_norm(hidden_states)
492
+
493
+ return (
494
+ hidden_states,
495
+ self_attn_weights,
496
+ cross_attn_weights,
497
+ present_key_value,
498
+ )
499
+
500
+ def build(self, input_shape=None):
501
+ if self.built:
502
+ return
503
+ self.built = True
504
+ if getattr(self, "self_attn", None) is not None:
505
+ with tf.name_scope(self.self_attn.name):
506
+ self.self_attn.build(None)
507
+ if getattr(self, "self_attn_layer_norm", None) is not None:
508
+ with tf.name_scope(self.self_attn_layer_norm.name):
509
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
510
+ if getattr(self, "encoder_attn", None) is not None:
511
+ with tf.name_scope(self.encoder_attn.name):
512
+ self.encoder_attn.build(None)
513
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
514
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
515
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
516
+ if getattr(self, "fc1", None) is not None:
517
+ with tf.name_scope(self.fc1.name):
518
+ self.fc1.build([None, None, self.embed_dim])
519
+ if getattr(self, "fc2", None) is not None:
520
+ with tf.name_scope(self.fc2.name):
521
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
522
+ if getattr(self, "final_layer_norm", None) is not None:
523
+ with tf.name_scope(self.final_layer_norm.name):
524
+ self.final_layer_norm.build([None, None, self.embed_dim])
525
+
526
+
527
+ class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel):
528
+ config_class = BlenderbotSmallConfig
529
+ base_model_prefix = "model"
530
+
531
+
532
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
533
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
534
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
535
+ etc.)
536
+
537
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
538
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
539
+ behavior.
540
+
541
+ <Tip>
542
+
543
+ TensorFlow models and layers in `transformers` accept two formats as input:
544
+
545
+ - having all inputs as keyword arguments (like PyTorch models), or
546
+ - having all inputs as a list, tuple or dict in the first positional argument.
547
+
548
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
549
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
550
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
551
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
552
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
553
+ positional argument:
554
+
555
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
556
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
557
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
558
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
559
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
560
+
561
+ Note that when creating models and layers with
562
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
563
+ about any of this, as you can just pass inputs like you would to any other Python function!
564
+
565
+ </Tip>
566
+
567
+ Args:
568
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
569
+ Initializing with a config file does not load the weights associated with the model, only the
570
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
571
+ """
572
+
573
+ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
574
+ Conversation example::
575
+
576
+ ```py
577
+ >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration
578
+
579
+ >>> mname = "facebook/blenderbot_small-90M"
580
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
581
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
582
+
583
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
584
+ >>> print("Human: ", UTTERANCE)
585
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
586
+
587
+ >>> reply_ids = model.generate(**inputs)
588
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
589
+ what kind of carbs do they eat? i don't know much about carbs.
590
+
591
+ >>> REPLY = "I'm not sure"
592
+ >>> print("Human: ", REPLY)
593
+ >>> NEXT_UTTERANCE = (
594
+ ... "My friends are cool but they eat too many carbs.</s> "
595
+ ... "<s>what kind of carbs do they eat? i don't know much about carbs.</s> "
596
+ ... "<s>I'm not sure."
597
+ ... )
598
+
599
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
600
+ >>> inputs.pop("token_type_ids")
601
+ >>> next_reply_ids = model.generate(**inputs)
602
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
603
+ ```
604
+ """
605
+
606
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
607
+ Args:
608
+ input_ids (`tf.Tensor` of shape `({0})`):
609
+ Indices of input sequence tokens in the vocabulary.
610
+
611
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
612
+ [`PreTrainedTokenizer.__call__`] for details.
613
+
614
+ [What are input IDs?](../glossary#input-ids)
615
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
616
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
617
+
618
+ - 1 for tokens that are **not masked**,
619
+ - 0 for tokens that are **masked**.
620
+
621
+ [What are attention masks?](../glossary#attention-mask)
622
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
623
+ Indices of decoder input sequence tokens in the vocabulary.
624
+
625
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
626
+ [`PreTrainedTokenizer.__call__`] for details.
627
+
628
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
629
+
630
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
631
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
632
+ `past_key_values`).
633
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
634
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
635
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
636
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
637
+ range `[0, config.max_position_embeddings - 1]`.
638
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
639
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
640
+
641
+ - 1 indicates the head is **not masked**,
642
+ - 0 indicates the head is **masked**.
643
+
644
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
645
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
646
+
647
+ - 1 indicates the head is **not masked**,
648
+ - 0 indicates the head is **masked**.
649
+
650
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
651
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
652
+
653
+ - 1 indicates the head is **not masked**,
654
+ - 0 indicates the head is **masked**.
655
+
656
+ encoder_outputs (`tf.FloatTensor`, *optional*):
657
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
658
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
659
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
660
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
661
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
662
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
663
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
664
+ use_cache (`bool`, *optional*, defaults to `True`):
665
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
666
+ `past_key_values`). Set to `False` during training, `True` during generation
667
+ output_attentions (`bool`, *optional*):
668
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
669
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
670
+ config will be used instead.
671
+ output_hidden_states (`bool`, *optional*):
672
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
673
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
674
+ used instead.
675
+ return_dict (`bool`, *optional*):
676
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
677
+ eager mode, in graph mode the value will always be set to True.
678
+ training (`bool`, *optional*, defaults to `False`):
679
+ Whether or not to use the model in training mode (some modules like dropout modules have different
680
+ behaviors between training and evaluation).
681
+ """
682
+
683
+
684
+ @keras_serializable
685
+ class TFBlenderbotSmallEncoder(keras.layers.Layer):
686
+ config_class = BlenderbotSmallConfig
687
+ """
688
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
689
+ [`TFBlenderbotSmallEncoderLayer`].
690
+
691
+ Args:
692
+ config: BlenderbotSmallConfig
693
+ """
694
+
695
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
696
+ super().__init__(**kwargs)
697
+ self.config = config
698
+ self.dropout = keras.layers.Dropout(config.dropout)
699
+ self.layerdrop = config.encoder_layerdrop
700
+ self.padding_idx = config.pad_token_id
701
+ self.max_source_positions = config.max_position_embeddings
702
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
703
+
704
+ self.embed_tokens = embed_tokens
705
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
706
+ config.max_position_embeddings,
707
+ config.d_model,
708
+ name="embed_positions",
709
+ )
710
+ self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
711
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
712
+ self.embed_dim = config.d_model
713
+
714
+ def get_embed_tokens(self):
715
+ return self.embed_tokens
716
+
717
+ def set_embed_tokens(self, embed_tokens):
718
+ self.embed_tokens = embed_tokens
719
+
720
+ @unpack_inputs
721
+ def call(
722
+ self,
723
+ input_ids=None,
724
+ inputs_embeds=None,
725
+ attention_mask=None,
726
+ head_mask=None,
727
+ output_attentions=None,
728
+ output_hidden_states=None,
729
+ return_dict=None,
730
+ training=False,
731
+ ):
732
+ """
733
+ Args:
734
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
735
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
736
+ provide it.
737
+
738
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
739
+ [`PreTrainedTokenizer.__call__`] for details.
740
+
741
+ [What are input IDs?](../glossary#input-ids)
742
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
743
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
744
+
745
+ - 1 for tokens that are **not masked**,
746
+ - 0 for tokens that are **masked**.
747
+
748
+ [What are attention masks?](../glossary#attention-mask)
749
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
750
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
751
+
752
+ - 1 indicates the head is **not masked**,
753
+ - 0 indicates the head is **masked**.
754
+
755
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
756
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
757
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
758
+ than the model's internal embedding lookup matrix.
759
+ output_attentions (`bool`, *optional*):
760
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
761
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
762
+ in the config will be used instead.
763
+ output_hidden_states (`bool`, *optional*):
764
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
765
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
766
+ will be used instead.
767
+ return_dict (`bool`, *optional*):
768
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
769
+ in eager mode, in graph mode the value will always be set to True.
770
+ training (`bool`, *optional*, defaults to `False`):
771
+ Whether or not to use the model in training mode (some modules like dropout modules have different
772
+ behaviors between training and evaluation).
773
+ """
774
+ if input_ids is not None and inputs_embeds is not None:
775
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
776
+ elif input_ids is not None:
777
+ input_shape = shape_list(input_ids)
778
+ elif inputs_embeds is not None:
779
+ input_shape = shape_list(inputs_embeds)[:-1]
780
+ else:
781
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
782
+
783
+ if inputs_embeds is None:
784
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
785
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
786
+
787
+ embed_pos = self.embed_positions(input_shape)
788
+ hidden_states = inputs_embeds + embed_pos
789
+ hidden_states = self.layernorm_embedding(hidden_states)
790
+ hidden_states = self.dropout(hidden_states, training=training)
791
+
792
+ # check attention mask and invert
793
+ if attention_mask is not None:
794
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
795
+ attention_mask = _expand_mask(attention_mask)
796
+ else:
797
+ attention_mask = None
798
+
799
+ encoder_states = () if output_hidden_states else None
800
+ all_attentions = () if output_attentions else None
801
+
802
+ # check if head_mask has a correct number of layers specified if desired
803
+ if head_mask is not None:
804
+ tf.debugging.assert_equal(
805
+ shape_list(head_mask)[0],
806
+ len(self.layers),
807
+ message=(
808
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
809
+ f" {shape_list(head_mask)[0]}."
810
+ ),
811
+ )
812
+
813
+ # encoder layers
814
+ for idx, encoder_layer in enumerate(self.layers):
815
+ if output_hidden_states:
816
+ encoder_states = encoder_states + (hidden_states,)
817
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
818
+ dropout_probability = random.uniform(0, 1)
819
+ if training and (dropout_probability < self.layerdrop): # skip the layer
820
+ continue
821
+
822
+ hidden_states, attn = encoder_layer(
823
+ hidden_states,
824
+ attention_mask,
825
+ head_mask[idx] if head_mask is not None else None,
826
+ )
827
+
828
+ if output_attentions:
829
+ all_attentions += (attn,)
830
+
831
+ if output_hidden_states:
832
+ encoder_states = encoder_states + (hidden_states,)
833
+
834
+ if not return_dict:
835
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
836
+ return TFBaseModelOutput(
837
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
838
+ )
839
+
840
+ def build(self, input_shape=None):
841
+ if self.built:
842
+ return
843
+ self.built = True
844
+ if getattr(self, "embed_positions", None) is not None:
845
+ with tf.name_scope(self.embed_positions.name):
846
+ self.embed_positions.build(None)
847
+ if getattr(self, "layernorm_embedding", None) is not None:
848
+ with tf.name_scope(self.layernorm_embedding.name):
849
+ self.layernorm_embedding.build([None, None, self.embed_dim])
850
+ if getattr(self, "layers", None) is not None:
851
+ for layer in self.layers:
852
+ with tf.name_scope(layer.name):
853
+ layer.build(None)
854
+
855
+
856
+ @keras_serializable
857
+ class TFBlenderbotSmallDecoder(keras.layers.Layer):
858
+ config_class = BlenderbotSmallConfig
859
+ """
860
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`]
861
+
862
+ Args:
863
+ config: BlenderbotSmallConfig
864
+ embed_tokens: output embedding
865
+ """
866
+
867
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
868
+ super().__init__(**kwargs)
869
+ self.config = config
870
+ self.padding_idx = config.pad_token_id
871
+ self.embed_tokens = embed_tokens
872
+ self.layerdrop = config.decoder_layerdrop
873
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
874
+ config.max_position_embeddings,
875
+ config.d_model,
876
+ name="embed_positions",
877
+ )
878
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
879
+ self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
880
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
881
+
882
+ self.dropout = keras.layers.Dropout(config.dropout)
883
+
884
+ def get_embed_tokens(self):
885
+ return self.embed_tokens
886
+
887
+ def set_embed_tokens(self, embed_tokens):
888
+ self.embed_tokens = embed_tokens
889
+
890
+ @unpack_inputs
891
+ def call(
892
+ self,
893
+ input_ids=None,
894
+ inputs_embeds=None,
895
+ attention_mask=None,
896
+ position_ids=None,
897
+ encoder_hidden_states=None,
898
+ encoder_attention_mask=None,
899
+ head_mask=None,
900
+ cross_attn_head_mask=None,
901
+ past_key_values=None,
902
+ use_cache=None,
903
+ output_attentions=None,
904
+ output_hidden_states=None,
905
+ return_dict=None,
906
+ training=False,
907
+ ):
908
+ r"""
909
+ Args:
910
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
911
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
912
+ provide it.
913
+
914
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
915
+ [`PreTrainedTokenizer.__call__`] for details.
916
+
917
+ [What are input IDs?](../glossary#input-ids)
918
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
919
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
920
+
921
+ - 1 for tokens that are **not masked**,
922
+ - 0 for tokens that are **masked**.
923
+
924
+ [What are attention masks?](../glossary#attention-mask)
925
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
926
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
927
+ range `[0, config.max_position_embeddings - 1]`.
928
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
929
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
930
+ of the decoder.
931
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
932
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
933
+ selected in `[0, 1]`:
934
+
935
+ - 1 for tokens that are **not masked**,
936
+ - 0 for tokens that are **masked**.
937
+
938
+ [What are attention masks?](../glossary#attention-mask)
939
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
940
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
941
+
942
+ - 1 indicates the head is **not masked**,
943
+ - 0 indicates the head is **masked**.
944
+
945
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
946
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
947
+
948
+ - 1 indicates the head is **not masked**,
949
+ - 0 indicates the head is **masked**.
950
+
951
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
952
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
953
+ decoding.
954
+
955
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
956
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
957
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
958
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
959
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
960
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
961
+ than the model's internal embedding lookup matrix.
962
+ output_attentions (`bool`, *optional*):
963
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
964
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
965
+ in the config will be used instead.
966
+ output_hidden_states (`bool`, *optional*):
967
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
968
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
969
+ will be used instead.
970
+ return_dict (`bool`, *optional*):
971
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
972
+ in eager mode, in graph mode the value will always be set to True.
973
+ training (`bool`, *optional*, defaults to `False`):
974
+ Whether or not to use the model in training mode (some modules like dropout modules have different
975
+ behaviors between training and evaluation).
976
+ """
977
+ if input_ids is not None and inputs_embeds is not None:
978
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
979
+ elif input_ids is not None:
980
+ input_shape = shape_list(input_ids)
981
+ elif inputs_embeds is not None:
982
+ input_shape = shape_list(inputs_embeds)[:-1]
983
+ else:
984
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
985
+
986
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
987
+
988
+ if inputs_embeds is None:
989
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
990
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
991
+
992
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
993
+ if input_shape[-1] > 1:
994
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
995
+ else:
996
+ combined_attention_mask = _expand_mask(
997
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
998
+ )
999
+
1000
+ if attention_mask is not None:
1001
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1002
+
1003
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1004
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1005
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1006
+
1007
+ # embed positions
1008
+ if position_ids is None:
1009
+ positions = self.embed_positions(input_shape, past_key_values_length)
1010
+ else:
1011
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
1012
+
1013
+ hidden_states = self.layernorm_embedding(inputs_embeds) + positions
1014
+ hidden_states = self.dropout(hidden_states, training=training)
1015
+
1016
+ # decoder layers
1017
+ all_hidden_states = () if output_hidden_states else None
1018
+ all_self_attns = () if output_attentions else None
1019
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1020
+ present_key_values = () if use_cache else None
1021
+
1022
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1023
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1024
+ if attn_mask is not None:
1025
+ tf.debugging.assert_equal(
1026
+ shape_list(attn_mask)[0],
1027
+ len(self.layers),
1028
+ message=(
1029
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1030
+ f" {shape_list(attn_mask)[0]}."
1031
+ ),
1032
+ )
1033
+
1034
+ for idx, decoder_layer in enumerate(self.layers):
1035
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1036
+ if output_hidden_states:
1037
+ all_hidden_states += (hidden_states,)
1038
+ dropout_probability = random.uniform(0, 1)
1039
+
1040
+ if training and (dropout_probability < self.layerdrop):
1041
+ continue
1042
+
1043
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1044
+
1045
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1046
+ hidden_states,
1047
+ attention_mask=combined_attention_mask,
1048
+ encoder_hidden_states=encoder_hidden_states,
1049
+ encoder_attention_mask=encoder_attention_mask,
1050
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1051
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1052
+ past_key_value=past_key_value,
1053
+ )
1054
+
1055
+ if use_cache:
1056
+ present_key_values += (present_key_value,)
1057
+
1058
+ if output_attentions:
1059
+ all_self_attns += (layer_self_attn,)
1060
+
1061
+ if encoder_hidden_states is not None:
1062
+ all_cross_attns += (layer_cross_attn,)
1063
+
1064
+ if output_hidden_states:
1065
+ all_hidden_states += (hidden_states,)
1066
+
1067
+ if not return_dict:
1068
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
1069
+ else:
1070
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1071
+ last_hidden_state=hidden_states,
1072
+ past_key_values=present_key_values,
1073
+ hidden_states=all_hidden_states,
1074
+ attentions=all_self_attns,
1075
+ cross_attentions=all_cross_attns,
1076
+ )
1077
+
1078
+ def build(self, input_shape=None):
1079
+ if self.built:
1080
+ return
1081
+ self.built = True
1082
+ if getattr(self, "embed_positions", None) is not None:
1083
+ with tf.name_scope(self.embed_positions.name):
1084
+ self.embed_positions.build(None)
1085
+ if getattr(self, "layernorm_embedding", None) is not None:
1086
+ with tf.name_scope(self.layernorm_embedding.name):
1087
+ self.layernorm_embedding.build([None, None, self.config.d_model])
1088
+ if getattr(self, "layers", None) is not None:
1089
+ for layer in self.layers:
1090
+ with tf.name_scope(layer.name):
1091
+ layer.build(None)
1092
+
1093
+
1094
+ @keras_serializable
1095
+ class TFBlenderbotSmallMainLayer(keras.layers.Layer):
1096
+ config_class = BlenderbotSmallConfig
1097
+
1098
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
1099
+ super().__init__(**kwargs)
1100
+
1101
+ self.config = config
1102
+ self.shared = keras.layers.Embedding(
1103
+ input_dim=config.vocab_size,
1104
+ output_dim=config.d_model,
1105
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
1106
+ name="model.shared",
1107
+ )
1108
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
1109
+ self.shared.load_weight_prefix = "model.shared"
1110
+
1111
+ self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder")
1112
+ self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder")
1113
+
1114
+ def get_input_embeddings(self):
1115
+ return self.shared
1116
+
1117
+ def set_input_embeddings(self, new_embeddings):
1118
+ self.shared = new_embeddings
1119
+ self.encoder.embed_tokens = self.shared
1120
+ self.decoder.embed_tokens = self.shared
1121
+
1122
+ @unpack_inputs
1123
+ def call(
1124
+ self,
1125
+ input_ids=None,
1126
+ attention_mask=None,
1127
+ decoder_input_ids=None,
1128
+ decoder_attention_mask=None,
1129
+ decoder_position_ids=None,
1130
+ head_mask=None,
1131
+ decoder_head_mask=None,
1132
+ cross_attn_head_mask=None,
1133
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1134
+ past_key_values=None,
1135
+ inputs_embeds=None,
1136
+ decoder_inputs_embeds=None,
1137
+ use_cache=None,
1138
+ output_attentions=None,
1139
+ output_hidden_states=None,
1140
+ return_dict=None,
1141
+ training=False,
1142
+ **kwargs,
1143
+ ):
1144
+ output_hidden_states = (
1145
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1146
+ )
1147
+
1148
+ if encoder_outputs is None:
1149
+ encoder_outputs = self.encoder(
1150
+ input_ids=input_ids,
1151
+ attention_mask=attention_mask,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ output_attentions=output_attentions,
1155
+ output_hidden_states=output_hidden_states,
1156
+ return_dict=return_dict,
1157
+ training=training,
1158
+ )
1159
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1160
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1161
+ encoder_outputs = TFBaseModelOutput(
1162
+ last_hidden_state=encoder_outputs[0],
1163
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1164
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1165
+ )
1166
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1167
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1168
+ encoder_outputs = encoder_outputs.to_tuple()
1169
+
1170
+ decoder_outputs = self.decoder(
1171
+ decoder_input_ids,
1172
+ attention_mask=decoder_attention_mask,
1173
+ position_ids=decoder_position_ids,
1174
+ encoder_hidden_states=encoder_outputs[0],
1175
+ encoder_attention_mask=attention_mask,
1176
+ head_mask=decoder_head_mask,
1177
+ cross_attn_head_mask=cross_attn_head_mask,
1178
+ past_key_values=past_key_values,
1179
+ inputs_embeds=decoder_inputs_embeds,
1180
+ use_cache=use_cache,
1181
+ output_attentions=output_attentions,
1182
+ output_hidden_states=output_hidden_states,
1183
+ return_dict=return_dict,
1184
+ training=training,
1185
+ )
1186
+
1187
+ if not return_dict:
1188
+ return decoder_outputs + encoder_outputs
1189
+
1190
+ return TFSeq2SeqModelOutput(
1191
+ last_hidden_state=decoder_outputs.last_hidden_state,
1192
+ past_key_values=decoder_outputs.past_key_values,
1193
+ decoder_hidden_states=decoder_outputs.hidden_states,
1194
+ decoder_attentions=decoder_outputs.attentions,
1195
+ cross_attentions=decoder_outputs.cross_attentions,
1196
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1197
+ encoder_hidden_states=encoder_outputs.hidden_states,
1198
+ encoder_attentions=encoder_outputs.attentions,
1199
+ )
1200
+
1201
+ def build(self, input_shape=None):
1202
+ if self.built:
1203
+ return
1204
+ self.built = True
1205
+ # The shared/tied weights expect to be in the model base namespace
1206
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
1207
+ # the current one.
1208
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
1209
+ self.shared.build(None)
1210
+ if getattr(self, "encoder", None) is not None:
1211
+ with tf.name_scope(self.encoder.name):
1212
+ self.encoder.build(None)
1213
+ if getattr(self, "decoder", None) is not None:
1214
+ with tf.name_scope(self.decoder.name):
1215
+ self.decoder.build(None)
1216
+
1217
+
1218
+ @add_start_docstrings(
1219
+ "The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.",
1220
+ BLENDERBOT_SMALL_START_DOCSTRING,
1221
+ )
1222
+ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
1223
+ def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs):
1224
+ super().__init__(config, *inputs, **kwargs)
1225
+
1226
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
1227
+
1228
+ def get_encoder(self):
1229
+ return self.model.encoder
1230
+
1231
+ def get_decoder(self):
1232
+ return self.model.decoder
1233
+
1234
+ @unpack_inputs
1235
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1236
+ @add_code_sample_docstrings(
1237
+ checkpoint=_CHECKPOINT_FOR_DOC,
1238
+ output_type=TFSeq2SeqModelOutput,
1239
+ config_class=_CONFIG_FOR_DOC,
1240
+ )
1241
+ def call(
1242
+ self,
1243
+ input_ids: tf.Tensor | None = None,
1244
+ attention_mask: tf.Tensor | None = None,
1245
+ decoder_input_ids: tf.Tensor | None = None,
1246
+ decoder_attention_mask: tf.Tensor | None = None,
1247
+ decoder_position_ids: tf.Tensor | None = None,
1248
+ head_mask: tf.Tensor | None = None,
1249
+ decoder_head_mask: tf.Tensor | None = None,
1250
+ cross_attn_head_mask: tf.Tensor | None = None,
1251
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1252
+ past_key_values: List[tf.Tensor] | None = None,
1253
+ inputs_embeds: tf.Tensor | None = None,
1254
+ decoder_inputs_embeds: tf.Tensor | None = None,
1255
+ use_cache: Optional[bool] = None,
1256
+ output_attentions: Optional[bool] = None,
1257
+ output_hidden_states: Optional[bool] = None,
1258
+ return_dict: Optional[bool] = None,
1259
+ training: Optional[bool] = False,
1260
+ **kwargs,
1261
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
1262
+ outputs = self.model(
1263
+ input_ids=input_ids,
1264
+ attention_mask=attention_mask,
1265
+ decoder_input_ids=decoder_input_ids,
1266
+ decoder_attention_mask=decoder_attention_mask,
1267
+ decoder_position_ids=decoder_position_ids,
1268
+ head_mask=head_mask,
1269
+ decoder_head_mask=decoder_head_mask,
1270
+ cross_attn_head_mask=cross_attn_head_mask,
1271
+ encoder_outputs=encoder_outputs,
1272
+ past_key_values=past_key_values,
1273
+ inputs_embeds=inputs_embeds,
1274
+ decoder_inputs_embeds=decoder_inputs_embeds,
1275
+ use_cache=use_cache,
1276
+ output_attentions=output_attentions,
1277
+ output_hidden_states=output_hidden_states,
1278
+ return_dict=return_dict,
1279
+ training=training,
1280
+ )
1281
+
1282
+ return outputs
1283
+
1284
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
1285
+ def serving_output(self, output):
1286
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1287
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1288
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1289
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1290
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1291
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1292
+
1293
+ return TFSeq2SeqModelOutput(
1294
+ last_hidden_state=output.last_hidden_state,
1295
+ past_key_values=pkv,
1296
+ decoder_hidden_states=dec_hs,
1297
+ decoder_attentions=dec_attns,
1298
+ cross_attentions=cross_attns,
1299
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1300
+ encoder_hidden_states=enc_hs,
1301
+ encoder_attentions=enc_attns,
1302
+ )
1303
+
1304
+ def build(self, input_shape=None):
1305
+ if self.built:
1306
+ return
1307
+ self.built = True
1308
+ if getattr(self, "model", None) is not None:
1309
+ with tf.name_scope(self.model.name):
1310
+ self.model.build(None)
1311
+
1312
+
1313
+ # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
1314
+ class BiasLayer(keras.layers.Layer):
1315
+ """
1316
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
1317
+ so all weights have to be registered in a layer.
1318
+ """
1319
+
1320
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
1321
+ super().__init__(name=name, **kwargs)
1322
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
1323
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
1324
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
1325
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
1326
+
1327
+ def call(self, x):
1328
+ return x + self.bias
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
1333
+ BLENDERBOT_SMALL_START_DOCSTRING,
1334
+ )
1335
+ class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss):
1336
+ _keys_to_ignore_on_load_unexpected = [
1337
+ r"model.encoder.embed_tokens.weight",
1338
+ r"model.decoder.embed_tokens.weight",
1339
+ ]
1340
+
1341
+ def __init__(self, config, *inputs, **kwargs):
1342
+ super().__init__(config, *inputs, **kwargs)
1343
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
1344
+ self.use_cache = config.use_cache
1345
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
1346
+ self.bias_layer = BiasLayer(
1347
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
1348
+ )
1349
+
1350
+ def get_decoder(self):
1351
+ return self.model.decoder
1352
+
1353
+ def get_encoder(self):
1354
+ return self.model.encoder
1355
+
1356
+ def get_output_embeddings(self):
1357
+ return self.get_input_embeddings()
1358
+
1359
+ def set_output_embeddings(self, value):
1360
+ self.set_input_embeddings(value)
1361
+
1362
+ def get_bias(self):
1363
+ return {"final_logits_bias": self.bias_layer.bias}
1364
+
1365
+ def set_bias(self, value):
1366
+ # Replaces the existing layers containing bias for correct (de)serialization.
1367
+ vocab_size = value["final_logits_bias"].shape[-1]
1368
+ self.bias_layer = BiasLayer(
1369
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
1370
+ )
1371
+ self.bias_layer.bias.assign(value["final_logits_bias"])
1372
+
1373
+ @unpack_inputs
1374
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1375
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1376
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
1377
+ def call(
1378
+ self,
1379
+ input_ids: tf.Tensor | None = None,
1380
+ attention_mask: tf.Tensor | None = None,
1381
+ decoder_input_ids: tf.Tensor | None = None,
1382
+ decoder_attention_mask: tf.Tensor | None = None,
1383
+ decoder_position_ids: tf.Tensor | None = None,
1384
+ head_mask: tf.Tensor | None = None,
1385
+ decoder_head_mask: tf.Tensor | None = None,
1386
+ cross_attn_head_mask: tf.Tensor | None = None,
1387
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1388
+ past_key_values: List[tf.Tensor] | None = None,
1389
+ inputs_embeds: tf.Tensor | None = None,
1390
+ decoder_inputs_embeds: tf.Tensor | None = None,
1391
+ use_cache: Optional[bool] = None,
1392
+ output_attentions: Optional[bool] = None,
1393
+ output_hidden_states: Optional[bool] = None,
1394
+ return_dict: Optional[bool] = None,
1395
+ labels: tf.Tensor | None = None,
1396
+ training: Optional[bool] = False,
1397
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
1398
+ r"""
1399
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
1400
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1401
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1402
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1403
+
1404
+ Returns:
1405
+
1406
+ """
1407
+
1408
+ if labels is not None:
1409
+ labels = tf.where(
1410
+ labels == self.config.pad_token_id,
1411
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
1412
+ labels,
1413
+ )
1414
+ use_cache = False
1415
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1416
+ decoder_input_ids = shift_tokens_right(
1417
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1418
+ )
1419
+
1420
+ outputs = self.model(
1421
+ input_ids,
1422
+ attention_mask=attention_mask,
1423
+ decoder_input_ids=decoder_input_ids,
1424
+ decoder_attention_mask=decoder_attention_mask,
1425
+ decoder_position_ids=decoder_position_ids,
1426
+ head_mask=head_mask,
1427
+ decoder_head_mask=decoder_head_mask,
1428
+ cross_attn_head_mask=cross_attn_head_mask,
1429
+ encoder_outputs=encoder_outputs,
1430
+ past_key_values=past_key_values,
1431
+ inputs_embeds=inputs_embeds,
1432
+ decoder_inputs_embeds=decoder_inputs_embeds,
1433
+ use_cache=use_cache,
1434
+ output_attentions=output_attentions,
1435
+ output_hidden_states=output_hidden_states,
1436
+ return_dict=return_dict,
1437
+ training=training,
1438
+ )
1439
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
1440
+ lm_logits = self.bias_layer(lm_logits)
1441
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1442
+
1443
+ if not return_dict:
1444
+ output = (lm_logits,) + outputs[1:]
1445
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1446
+ return TFSeq2SeqLMOutput(
1447
+ loss=masked_lm_loss,
1448
+ logits=lm_logits,
1449
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
1450
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
1451
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
1452
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
1453
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
1454
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
1455
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
1456
+ )
1457
+
1458
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
1459
+ def serving_output(self, output):
1460
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1461
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1462
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1463
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1464
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1465
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1466
+
1467
+ return TFSeq2SeqLMOutput(
1468
+ logits=output.logits,
1469
+ past_key_values=pkv,
1470
+ decoder_hidden_states=dec_hs,
1471
+ decoder_attentions=dec_attns,
1472
+ cross_attentions=cross_attns,
1473
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1474
+ encoder_hidden_states=enc_hs,
1475
+ encoder_attentions=enc_attns,
1476
+ )
1477
+
1478
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
1479
+ def prepare_inputs_for_generation(
1480
+ self,
1481
+ decoder_input_ids,
1482
+ past_key_values=None,
1483
+ attention_mask=None,
1484
+ decoder_attention_mask=None,
1485
+ head_mask=None,
1486
+ decoder_head_mask=None,
1487
+ cross_attn_head_mask=None,
1488
+ use_cache=None,
1489
+ encoder_outputs=None,
1490
+ **kwargs,
1491
+ ):
1492
+ # cut decoder_input_ids if past_key_values is used
1493
+ if past_key_values is not None:
1494
+ decoder_input_ids = decoder_input_ids[:, -1:]
1495
+
1496
+ if decoder_attention_mask is not None: # xla
1497
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
1498
+ elif past_key_values is not None: # no xla + past_key_values
1499
+ decoder_position_ids = past_key_values[0][0].shape[2]
1500
+ else: # no xla + no past_key_values
1501
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
1502
+
1503
+ return {
1504
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1505
+ "encoder_outputs": encoder_outputs,
1506
+ "past_key_values": past_key_values,
1507
+ "decoder_input_ids": decoder_input_ids,
1508
+ "attention_mask": attention_mask,
1509
+ "decoder_attention_mask": decoder_attention_mask,
1510
+ "decoder_position_ids": decoder_position_ids,
1511
+ "head_mask": head_mask,
1512
+ "decoder_head_mask": decoder_head_mask,
1513
+ "cross_attn_head_mask": cross_attn_head_mask,
1514
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1515
+ }
1516
+
1517
+ def build(self, input_shape=None):
1518
+ if self.built:
1519
+ return
1520
+ self.built = True
1521
+ if getattr(self, "model", None) is not None:
1522
+ with tf.name_scope(self.model.name):
1523
+ self.model.build(None)
1524
+ if getattr(self, "bias_layer", None) is not None:
1525
+ with tf.name_scope(self.bias_layer.name):
1526
+ self.bias_layer.build(None)
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for BlenderbotSmall."""
16
+
17
+ import json
18
+ import os
19
+ from typing import Dict, List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ "tokenizer_config_file": "tokenizer_config.json",
34
+ }
35
+
36
+
37
+ def get_pairs(word):
38
+ """
39
+ Return set of symbol pairs in a word.
40
+
41
+ Word is represented as tuple of symbols (symbols being variable-length strings).
42
+ """
43
+ pairs = set()
44
+ prev_char = word[0]
45
+ for char in word[1:]:
46
+ pairs.add((prev_char, char))
47
+ prev_char = char
48
+
49
+ pairs = set(pairs)
50
+ return pairs
51
+
52
+
53
+ class BlenderbotSmallTokenizer(PreTrainedTokenizer):
54
+ """
55
+ Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ the superclass for more information regarding methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ merges_file (`str`):
64
+ Path to the merges file.
65
+ bos_token (`str`, *optional*, defaults to `"__start__"`):
66
+ The beginning of sentence token.
67
+ eos_token (`str`, *optional*, defaults to `"__end__"`):
68
+ The end of sentence token.
69
+ unk_token (`str`, *optional*, defaults to `"__unk__"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ pad_token (`str`, *optional*, defaults to `"__null__"`):
73
+ The token used for padding, for example when batching sequences of different lengths.
74
+ kwargs (*optional*):
75
+ Additional keyword arguments passed along to [`PreTrainedTokenizer`]
76
+ """
77
+
78
+ vocab_files_names = VOCAB_FILES_NAMES
79
+ model_input_names = ["input_ids", "attention_mask"]
80
+
81
+ def __init__(
82
+ self,
83
+ vocab_file,
84
+ merges_file,
85
+ bos_token="__start__",
86
+ eos_token="__end__",
87
+ unk_token="__unk__",
88
+ pad_token="__null__",
89
+ **kwargs,
90
+ ):
91
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
92
+ self.encoder = json.load(vocab_handle)
93
+ self.decoder = {v: k for k, v in self.encoder.items()}
94
+ with open(merges_file, encoding="utf-8") as merges_handle:
95
+ merges = merges_handle.read().split("\n")[1:-1]
96
+ merges = [tuple(merge.split()) for merge in merges]
97
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
98
+ self.cache = {}
99
+ super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
100
+
101
+ @property
102
+ def vocab_size(self) -> int:
103
+ return len(self.encoder)
104
+
105
+ def get_vocab(self) -> Dict:
106
+ return dict(self.encoder, **self.added_tokens_encoder)
107
+
108
+ def bpe(self, token: str) -> str:
109
+ if token in self.cache:
110
+ return self.cache[token]
111
+ token = re.sub("([.,!?()])", r" \1", token)
112
+ token = re.sub("(')", r" \1 ", token)
113
+ token = re.sub(r"\s{2,}", " ", token)
114
+ if "\n" in token:
115
+ token = token.replace("\n", " __newln__")
116
+
117
+ tokens = token.split(" ")
118
+ words = []
119
+ for token in tokens:
120
+ if not len(token):
121
+ continue
122
+
123
+ token = token.lower()
124
+ word = tuple(token)
125
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
126
+ pairs = get_pairs(word)
127
+
128
+ if not pairs:
129
+ words.append(token)
130
+ continue
131
+
132
+ while True:
133
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
134
+ if bigram not in self.bpe_ranks:
135
+ break
136
+ first, second = bigram
137
+ new_word = []
138
+ i = 0
139
+
140
+ while i < len(word):
141
+ try:
142
+ j = word.index(first, i)
143
+ new_word.extend(word[i:j])
144
+ i = j
145
+ except ValueError:
146
+ new_word.extend(word[i:])
147
+ break
148
+
149
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
150
+ new_word.append(first + second)
151
+ i += 2
152
+ else:
153
+ new_word.append(word[i])
154
+ i += 1
155
+ new_word = tuple(new_word)
156
+ word = new_word
157
+ if len(word) == 1:
158
+ break
159
+ else:
160
+ pairs = get_pairs(word)
161
+ word = "@@ ".join(word)
162
+ word = word[:-4]
163
+
164
+ self.cache[token] = word
165
+ words.append(word)
166
+ return " ".join(words)
167
+
168
+ def _tokenize(self, text: str) -> List[str]:
169
+ """Split a string into tokens using BPE."""
170
+ split_tokens = []
171
+
172
+ words = re.findall(r"\S+\n?", text)
173
+
174
+ for token in words:
175
+ split_tokens.extend(list(self.bpe(token).split(" ")))
176
+ return split_tokens
177
+
178
+ def _convert_token_to_id(self, token: str) -> int:
179
+ """Converts a token to an id using the vocab."""
180
+ token = token.lower()
181
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
182
+
183
+ def _convert_id_to_token(self, index: int) -> str:
184
+ """Converts an index (integer) in a token (str) using the vocab."""
185
+ return self.decoder.get(index, self.unk_token)
186
+
187
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
188
+ """Converts a sequence of tokens in a single string."""
189
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
190
+ return out_string
191
+
192
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
193
+ if not os.path.isdir(save_directory):
194
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
195
+ return
196
+ vocab_file = os.path.join(
197
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
198
+ )
199
+ merge_file = os.path.join(
200
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
201
+ )
202
+
203
+ with open(vocab_file, "w", encoding="utf-8") as f:
204
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
205
+
206
+ index = 0
207
+ with open(merge_file, "w", encoding="utf-8") as writer:
208
+ writer.write("#version: 0.2\n")
209
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
210
+ if index != token_index:
211
+ logger.warning(
212
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
213
+ " Please check that the tokenizer is not corrupted!"
214
+ )
215
+ index = token_index
216
+ writer.write(" ".join(bpe_tokens) + "\n")
217
+ index += 1
218
+
219
+ return vocab_file, merge_file
220
+
221
+ @property
222
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
223
+ def default_chat_template(self):
224
+ """
225
+ A very simple chat template that just adds whitespace between messages.
226
+ """
227
+ logger.warning_once(
228
+ "\nNo chat template is defined for this tokenizer - using the default template "
229
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
230
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
231
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
232
+ )
233
+ return (
234
+ "{% for message in messages %}"
235
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
236
+ "{{ message['content'] }}"
237
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
238
+ "{% endfor %}"
239
+ "{{ eos_token }}"
240
+ )
venv/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast tokenization class for BlenderbotSmall."""
16
+ from typing import List, Optional
17
+
18
+ from tokenizers import ByteLevelBPETokenizer
19
+
20
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from ...utils import logging
22
+ from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ "tokenizer_config_file": "tokenizer_config.json",
31
+ }
32
+
33
+
34
+ class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):
35
+ """
36
+ Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library).
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ slow_tokenizer_class = BlenderbotSmallTokenizer
45
+
46
+ def __init__(
47
+ self,
48
+ vocab_file=None,
49
+ merges_file=None,
50
+ unk_token="<|endoftext|>",
51
+ bos_token="<|endoftext|>",
52
+ eos_token="<|endoftext|>",
53
+ add_prefix_space=False,
54
+ trim_offsets=True,
55
+ **kwargs,
56
+ ):
57
+ super().__init__(
58
+ ByteLevelBPETokenizer(
59
+ vocab=vocab_file,
60
+ merges=merges_file,
61
+ add_prefix_space=add_prefix_space,
62
+ trim_offsets=trim_offsets,
63
+ ),
64
+ bos_token=bos_token,
65
+ eos_token=eos_token,
66
+ unk_token=unk_token,
67
+ **kwargs,
68
+ )
69
+ self.add_prefix_space = add_prefix_space
70
+
71
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
72
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
73
+ if token_ids_1 is None:
74
+ return output
75
+
76
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
77
+
78
+ def create_token_type_ids_from_sequences(
79
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
80
+ ) -> List[int]:
81
+ """
82
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall
83
+ does not make use of token type ids, therefore a list of zeros is returned.
84
+
85
+ Args:
86
+ token_ids_0 (`List[int]`):
87
+ List of IDs.
88
+ token_ids_1 (`List[int]`, *optional*):
89
+ Optional second list of IDs for sequence pairs.
90
+
91
+ Returns:
92
+ `List[int]`: List of zeros.
93
+ """
94
+ sep = [self.sep_token_id]
95
+ cls = [self.cls_token_id]
96
+
97
+ if token_ids_1 is None:
98
+ return len(cls + token_ids_0 + sep) * [0]
99
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
100
+
101
+ @property
102
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
103
+ def default_chat_template(self):
104
+ """
105
+ A very simple chat template that just adds whitespace between messages.
106
+ """
107
+ logger.warning_once(
108
+ "\nNo chat template is defined for this tokenizer - using the default template "
109
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
110
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
111
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
112
+ )
113
+ return (
114
+ "{% for message in messages %}"
115
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
116
+ "{{ message['content'] }}"
117
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
118
+ "{% endfor %}"
119
+ "{{ eos_token }}"
120
+ )
venv/lib/python3.10/site-packages/transformers/models/canine/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
21
+ "tokenization_canine": ["CanineTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_canine"] = [
31
+ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "CanineForMultipleChoice",
33
+ "CanineForQuestionAnswering",
34
+ "CanineForSequenceClassification",
35
+ "CanineForTokenClassification",
36
+ "CanineLayer",
37
+ "CanineModel",
38
+ "CaninePreTrainedModel",
39
+ "load_tf_weights_in_canine",
40
+ ]
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
45
+ from .tokenization_canine import CanineTokenizer
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_canine import (
54
+ CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ CanineForMultipleChoice,
56
+ CanineForQuestionAnswering,
57
+ CanineForSequenceClassification,
58
+ CanineForTokenClassification,
59
+ CanineLayer,
60
+ CanineModel,
61
+ CaninePreTrainedModel,
62
+ load_tf_weights_in_canine,
63
+ )
64
+
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc ADDED
Binary file (5.74 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/convert_canine_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/modeling_canine.cpython-310.pyc ADDED
Binary file (45.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/canine/configuration_canine.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CANINE model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class CanineConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an
30
+ CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the CANINE
32
+ [google/canine-s](https://huggingface.co/google/canine-s) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimension of the encoder layers and the pooler layer.
41
+ num_hidden_layers (`int`, *optional*, defaults to 12):
42
+ Number of hidden layers in the deep Transformer encoder.
43
+ num_attention_heads (`int`, *optional*, defaults to 12):
44
+ Number of attention heads for each attention layer in the Transformer encoders.
45
+ intermediate_size (`int`, *optional*, defaults to 3072):
46
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoders.
47
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
48
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
49
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
50
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
51
+ The dropout probability for all fully connected layers in the embeddings, encoders, and pooler.
52
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout ratio for the attention probabilities.
54
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
55
+ The maximum sequence length that this model might ever be used with.
56
+ type_vocab_size (`int`, *optional*, defaults to 16):
57
+ The vocabulary size of the `token_type_ids` passed when calling [`CanineModel`].
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ pad_token_id (`int`, *optional*, defaults to 0):
63
+ Padding token id.
64
+ bos_token_id (`int`, *optional*, defaults to 57344):
65
+ Beginning of stream token id.
66
+ eos_token_id (`int`, *optional*, defaults to 57345):
67
+ End of stream token id.
68
+ downsampling_rate (`int`, *optional*, defaults to 4):
69
+ The rate at which to downsample the original character sequence length before applying the deep Transformer
70
+ encoder.
71
+ upsampling_kernel_size (`int`, *optional*, defaults to 4):
72
+ The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when
73
+ projecting back from `hidden_size`*2 to `hidden_size`.
74
+ num_hash_functions (`int`, *optional*, defaults to 8):
75
+ The number of hash functions to use. Each hash function has its own embedding matrix.
76
+ num_hash_buckets (`int`, *optional*, defaults to 16384):
77
+ The number of hash buckets to use.
78
+ local_transformer_stride (`int`, *optional*, defaults to 128):
79
+ The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good
80
+ TPU/XLA memory alignment.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ >>> from transformers import CanineConfig, CanineModel
86
+
87
+ >>> # Initializing a CANINE google/canine-s style configuration
88
+ >>> configuration = CanineConfig()
89
+
90
+ >>> # Initializing a model (with random weights) from the google/canine-s style configuration
91
+ >>> model = CanineModel(configuration)
92
+
93
+ >>> # Accessing the model configuration
94
+ >>> configuration = model.config
95
+ ```"""
96
+
97
+ model_type = "canine"
98
+
99
+ def __init__(
100
+ self,
101
+ hidden_size=768,
102
+ num_hidden_layers=12,
103
+ num_attention_heads=12,
104
+ intermediate_size=3072,
105
+ hidden_act="gelu",
106
+ hidden_dropout_prob=0.1,
107
+ attention_probs_dropout_prob=0.1,
108
+ max_position_embeddings=16384,
109
+ type_vocab_size=16,
110
+ initializer_range=0.02,
111
+ layer_norm_eps=1e-12,
112
+ pad_token_id=0,
113
+ bos_token_id=0xE000,
114
+ eos_token_id=0xE001,
115
+ downsampling_rate=4,
116
+ upsampling_kernel_size=4,
117
+ num_hash_functions=8,
118
+ num_hash_buckets=16384,
119
+ local_transformer_stride=128, # Good TPU/XLA memory alignment.
120
+ **kwargs,
121
+ ):
122
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
123
+
124
+ self.max_position_embeddings = max_position_embeddings
125
+ self.hidden_size = hidden_size
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.num_attention_heads = num_attention_heads
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_act = hidden_act
130
+ self.hidden_dropout_prob = hidden_dropout_prob
131
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
132
+ self.initializer_range = initializer_range
133
+ self.type_vocab_size = type_vocab_size
134
+ self.layer_norm_eps = layer_norm_eps
135
+
136
+ # Character config:
137
+ self.downsampling_rate = downsampling_rate
138
+ self.upsampling_kernel_size = upsampling_kernel_size
139
+ self.num_hash_functions = num_hash_functions
140
+ self.num_hash_buckets = num_hash_buckets
141
+ self.local_transformer_stride = local_transformer_stride
venv/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert CANINE checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path):
28
+ # Initialize PyTorch model
29
+ config = CanineConfig()
30
+ model = CanineModel(config)
31
+ model.eval()
32
+
33
+ print(f"Building PyTorch model from configuration: {config}")
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_canine(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model (weights and configuration)
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ model.save_pretrained(pytorch_dump_path)
41
+
42
+ # Save tokenizer files
43
+ tokenizer = CanineTokenizer()
44
+ print(f"Save tokenizer files to {pytorch_dump_path}")
45
+ tokenizer.save_pretrained(pytorch_dump_path)
46
+
47
+
48
+ if __name__ == "__main__":
49
+ parser = argparse.ArgumentParser()
50
+ # Required parameters
51
+ parser.add_argument(
52
+ "--tf_checkpoint_path",
53
+ default=None,
54
+ type=str,
55
+ required=True,
56
+ help="Path to the TensorFlow checkpoint. Should end with model.ckpt",
57
+ )
58
+ parser.add_argument(
59
+ "--pytorch_dump_path",
60
+ default=None,
61
+ type=str,
62
+ required=True,
63
+ help="Path to a folder where the PyTorch model will be placed.",
64
+ )
65
+ args = parser.parse_args()
66
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path)
venv/lib/python3.10/site-packages/transformers/models/canine/modeling_canine.py ADDED
@@ -0,0 +1,1645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google AI The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CANINE model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ import os
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutput,
32
+ ModelOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_canine import CanineConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "google/canine-s"
53
+ _CONFIG_FOR_DOC = "CanineConfig"
54
+
55
+
56
+ from ..deprecated._archive_maps import CANINE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ # Support up to 16 hash functions.
60
+ _PRIMES = [31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223]
61
+
62
+
63
+ @dataclass
64
+ class CanineModelOutputWithPooling(ModelOutput):
65
+ """
66
+ Output type of [`CanineModel`]. Based on [`~modeling_outputs.BaseModelOutputWithPooling`], but with slightly
67
+ different `hidden_states` and `attentions`, as these also include the hidden states and attentions of the shallow
68
+ Transformer encoders.
69
+
70
+ Args:
71
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
72
+ Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final
73
+ shallow Transformer encoder).
74
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
75
+ Hidden-state of the first token of the sequence (classification token) at the last layer of the deep
76
+ Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer
77
+ weights are trained from the next sentence prediction (classification) objective during pretraining.
78
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
79
+ Tuple of `torch.FloatTensor` (one for the input to each encoder + one for the output of each layer of each
80
+ encoder) of shape `(batch_size, sequence_length, hidden_size)` and `(batch_size, sequence_length //
81
+ config.downsampling_rate, hidden_size)`. Hidden-states of the model at the output of each layer plus the
82
+ initial input to each Transformer encoder. The hidden states of the shallow encoders have length
83
+ `sequence_length`, but the hidden states of the deep encoder have length `sequence_length` //
84
+ `config.downsampling_rate`.
85
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
86
+ Tuple of `torch.FloatTensor` (one for each layer) of the 3 Transformer encoders of shape `(batch_size,
87
+ num_heads, sequence_length, sequence_length)` and `(batch_size, num_heads, sequence_length //
88
+ config.downsampling_rate, sequence_length // config.downsampling_rate)`. Attentions weights after the
89
+ attention softmax, used to compute the weighted average in the self-attention heads.
90
+ """
91
+
92
+ last_hidden_state: torch.FloatTensor = None
93
+ pooler_output: torch.FloatTensor = None
94
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
95
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
96
+
97
+
98
+ def load_tf_weights_in_canine(model, config, tf_checkpoint_path):
99
+ """Load tf checkpoints in a pytorch model."""
100
+ try:
101
+ import re
102
+
103
+ import numpy as np
104
+ import tensorflow as tf
105
+ except ImportError:
106
+ logger.error(
107
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
108
+ "https://www.tensorflow.org/install/ for installation instructions."
109
+ )
110
+ raise
111
+ tf_path = os.path.abspath(tf_checkpoint_path)
112
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
113
+ # Load weights from TF model
114
+ init_vars = tf.train.list_variables(tf_path)
115
+ names = []
116
+ arrays = []
117
+ for name, shape in init_vars:
118
+ logger.info(f"Loading TF weight {name} with shape {shape}")
119
+ array = tf.train.load_variable(tf_path, name)
120
+ names.append(name)
121
+ arrays.append(array)
122
+
123
+ for name, array in zip(names, arrays):
124
+ name = name.split("/")
125
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
126
+ # which are not required for using pretrained model
127
+ # also discard the cls weights (which were used for the next sentence prediction pre-training task)
128
+ if any(
129
+ n
130
+ in [
131
+ "adam_v",
132
+ "adam_m",
133
+ "AdamWeightDecayOptimizer",
134
+ "AdamWeightDecayOptimizer_1",
135
+ "global_step",
136
+ "cls",
137
+ "autoregressive_decoder",
138
+ "char_output_weights",
139
+ ]
140
+ for n in name
141
+ ):
142
+ logger.info(f"Skipping {'/'.join(name)}")
143
+ continue
144
+ # if first scope name starts with "bert", change it to "encoder"
145
+ if name[0] == "bert":
146
+ name[0] = "encoder"
147
+ # remove "embeddings" middle name of HashBucketCodepointEmbedders
148
+ elif name[1] == "embeddings":
149
+ name.remove(name[1])
150
+ # rename segment_embeddings to token_type_embeddings
151
+ elif name[1] == "segment_embeddings":
152
+ name[1] = "token_type_embeddings"
153
+ # rename initial convolutional projection layer
154
+ elif name[1] == "initial_char_encoder":
155
+ name = ["chars_to_molecules"] + name[-2:]
156
+ # rename final convolutional projection layer
157
+ elif name[0] == "final_char_encoder" and name[1] in ["LayerNorm", "conv"]:
158
+ name = ["projection"] + name[1:]
159
+ pointer = model
160
+ for m_name in name:
161
+ if (re.fullmatch(r"[A-Za-z]+_\d+", m_name)) and "Embedder" not in m_name:
162
+ scope_names = re.split(r"_(\d+)", m_name)
163
+ else:
164
+ scope_names = [m_name]
165
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
166
+ pointer = getattr(pointer, "weight")
167
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
168
+ pointer = getattr(pointer, "bias")
169
+ elif scope_names[0] == "output_weights":
170
+ pointer = getattr(pointer, "weight")
171
+ else:
172
+ try:
173
+ pointer = getattr(pointer, scope_names[0])
174
+ except AttributeError:
175
+ logger.info(f"Skipping {'/'.join(name)}")
176
+ continue
177
+ if len(scope_names) >= 2:
178
+ num = int(scope_names[1])
179
+ pointer = pointer[num]
180
+ if m_name[-11:] == "_embeddings":
181
+ pointer = getattr(pointer, "weight")
182
+ elif m_name[-10:] in [f"Embedder_{i}" for i in range(8)]:
183
+ pointer = getattr(pointer, "weight")
184
+ elif m_name == "kernel":
185
+ array = np.transpose(array)
186
+
187
+ if pointer.shape != array.shape:
188
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
189
+
190
+ logger.info(f"Initialize PyTorch weight {name}")
191
+ pointer.data = torch.from_numpy(array)
192
+ return model
193
+
194
+
195
+ class CanineEmbeddings(nn.Module):
196
+ """Construct the character, position and token_type embeddings."""
197
+
198
+ def __init__(self, config):
199
+ super().__init__()
200
+
201
+ self.config = config
202
+
203
+ # character embeddings
204
+ shard_embedding_size = config.hidden_size // config.num_hash_functions
205
+ for i in range(config.num_hash_functions):
206
+ name = f"HashBucketCodepointEmbedder_{i}"
207
+ setattr(self, name, nn.Embedding(config.num_hash_buckets, shard_embedding_size))
208
+ self.char_position_embeddings = nn.Embedding(config.num_hash_buckets, config.hidden_size)
209
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
210
+
211
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
212
+ # any TensorFlow checkpoint file
213
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
214
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
215
+
216
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
217
+ self.register_buffer(
218
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
219
+ )
220
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
221
+
222
+ def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int):
223
+ """
224
+ Converts ids to hash bucket ids via multiple hashing.
225
+
226
+ Args:
227
+ input_ids: The codepoints or other IDs to be hashed.
228
+ num_hashes: The number of hash functions to use.
229
+ num_buckets: The number of hash buckets (i.e. embeddings in each table).
230
+
231
+ Returns:
232
+ A list of tensors, each of which is the hash bucket IDs from one hash function.
233
+ """
234
+ if num_hashes > len(_PRIMES):
235
+ raise ValueError(f"`num_hashes` must be <= {len(_PRIMES)}")
236
+
237
+ primes = _PRIMES[:num_hashes]
238
+
239
+ result_tensors = []
240
+ for prime in primes:
241
+ hashed = ((input_ids + 1) * prime) % num_buckets
242
+ result_tensors.append(hashed)
243
+ return result_tensors
244
+
245
+ def _embed_hash_buckets(self, input_ids, embedding_size: int, num_hashes: int, num_buckets: int):
246
+ """Converts IDs (e.g. codepoints) into embeddings via multiple hashing."""
247
+ if embedding_size % num_hashes != 0:
248
+ raise ValueError(f"Expected `embedding_size` ({embedding_size}) % `num_hashes` ({num_hashes}) == 0")
249
+
250
+ hash_bucket_tensors = self._hash_bucket_tensors(input_ids, num_hashes=num_hashes, num_buckets=num_buckets)
251
+ embedding_shards = []
252
+ for i, hash_bucket_ids in enumerate(hash_bucket_tensors):
253
+ name = f"HashBucketCodepointEmbedder_{i}"
254
+ shard_embeddings = getattr(self, name)(hash_bucket_ids)
255
+ embedding_shards.append(shard_embeddings)
256
+
257
+ return torch.cat(embedding_shards, dim=-1)
258
+
259
+ def forward(
260
+ self,
261
+ input_ids: Optional[torch.LongTensor] = None,
262
+ token_type_ids: Optional[torch.LongTensor] = None,
263
+ position_ids: Optional[torch.LongTensor] = None,
264
+ inputs_embeds: Optional[torch.FloatTensor] = None,
265
+ ) -> torch.FloatTensor:
266
+ if input_ids is not None:
267
+ input_shape = input_ids.size()
268
+ else:
269
+ input_shape = inputs_embeds.size()[:-1]
270
+
271
+ seq_length = input_shape[1]
272
+
273
+ if position_ids is None:
274
+ position_ids = self.position_ids[:, :seq_length]
275
+
276
+ if token_type_ids is None:
277
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
278
+
279
+ if inputs_embeds is None:
280
+ inputs_embeds = self._embed_hash_buckets(
281
+ input_ids, self.config.hidden_size, self.config.num_hash_functions, self.config.num_hash_buckets
282
+ )
283
+
284
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
285
+
286
+ embeddings = inputs_embeds + token_type_embeddings
287
+
288
+ if self.position_embedding_type == "absolute":
289
+ position_embeddings = self.char_position_embeddings(position_ids)
290
+ embeddings += position_embeddings
291
+ embeddings = self.LayerNorm(embeddings)
292
+ embeddings = self.dropout(embeddings)
293
+ return embeddings
294
+
295
+
296
+ class CharactersToMolecules(nn.Module):
297
+ """Convert character sequence to initial molecule sequence (i.e. downsample) using strided convolutions."""
298
+
299
+ def __init__(self, config):
300
+ super().__init__()
301
+
302
+ self.conv = nn.Conv1d(
303
+ in_channels=config.hidden_size,
304
+ out_channels=config.hidden_size,
305
+ kernel_size=config.downsampling_rate,
306
+ stride=config.downsampling_rate,
307
+ )
308
+ self.activation = ACT2FN[config.hidden_act]
309
+
310
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
311
+ # any TensorFlow checkpoint file
312
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
313
+
314
+ def forward(self, char_encoding: torch.Tensor) -> torch.Tensor:
315
+ # `cls_encoding`: [batch, 1, hidden_size]
316
+ cls_encoding = char_encoding[:, 0:1, :]
317
+
318
+ # char_encoding has shape [batch, char_seq, hidden_size]
319
+ # We transpose it to be [batch, hidden_size, char_seq]
320
+ char_encoding = torch.transpose(char_encoding, 1, 2)
321
+ downsampled = self.conv(char_encoding)
322
+ downsampled = torch.transpose(downsampled, 1, 2)
323
+ downsampled = self.activation(downsampled)
324
+
325
+ # Truncate the last molecule in order to reserve a position for [CLS].
326
+ # Often, the last position is never used (unless we completely fill the
327
+ # text buffer). This is important in order to maintain alignment on TPUs
328
+ # (i.e. a multiple of 128).
329
+ downsampled_truncated = downsampled[:, 0:-1, :]
330
+
331
+ # We also keep [CLS] as a separate sequence position since we always
332
+ # want to reserve a position (and the model capacity that goes along
333
+ # with that) in the deep BERT stack.
334
+ # `result`: [batch, molecule_seq, molecule_dim]
335
+ result = torch.cat([cls_encoding, downsampled_truncated], dim=1)
336
+
337
+ result = self.LayerNorm(result)
338
+
339
+ return result
340
+
341
+
342
+ class ConvProjection(nn.Module):
343
+ """
344
+ Project representations from hidden_size*2 back to hidden_size across a window of w = config.upsampling_kernel_size
345
+ characters.
346
+ """
347
+
348
+ def __init__(self, config):
349
+ super().__init__()
350
+ self.config = config
351
+ self.conv = nn.Conv1d(
352
+ in_channels=config.hidden_size * 2,
353
+ out_channels=config.hidden_size,
354
+ kernel_size=config.upsampling_kernel_size,
355
+ stride=1,
356
+ )
357
+ self.activation = ACT2FN[config.hidden_act]
358
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
359
+ # any TensorFlow checkpoint file
360
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
361
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
362
+
363
+ def forward(
364
+ self,
365
+ inputs: torch.Tensor,
366
+ final_seq_char_positions: Optional[torch.Tensor] = None,
367
+ ) -> torch.Tensor:
368
+ # inputs has shape [batch, mol_seq, molecule_hidden_size+char_hidden_final]
369
+ # we transpose it to be [batch, molecule_hidden_size+char_hidden_final, mol_seq]
370
+ inputs = torch.transpose(inputs, 1, 2)
371
+
372
+ # PyTorch < 1.9 does not support padding="same" (which is used in the original implementation),
373
+ # so we pad the tensor manually before passing it to the conv layer
374
+ # based on https://github.com/google-research/big_transfer/blob/49afe42338b62af9fbe18f0258197a33ee578a6b/bit_tf2/models.py#L36-L38
375
+ pad_total = self.config.upsampling_kernel_size - 1
376
+ pad_beg = pad_total // 2
377
+ pad_end = pad_total - pad_beg
378
+
379
+ pad = nn.ConstantPad1d((pad_beg, pad_end), 0)
380
+ # `result`: shape (batch_size, char_seq_len, hidden_size)
381
+ result = self.conv(pad(inputs))
382
+ result = torch.transpose(result, 1, 2)
383
+ result = self.activation(result)
384
+ result = self.LayerNorm(result)
385
+ result = self.dropout(result)
386
+ final_char_seq = result
387
+
388
+ if final_seq_char_positions is not None:
389
+ # Limit transformer query seq and attention mask to these character
390
+ # positions to greatly reduce the compute cost. Typically, this is just
391
+ # done for the MLM training task.
392
+ # TODO add support for MLM
393
+ raise NotImplementedError("CanineForMaskedLM is currently not supported")
394
+ else:
395
+ query_seq = final_char_seq
396
+
397
+ return query_seq
398
+
399
+
400
+ class CanineSelfAttention(nn.Module):
401
+ def __init__(self, config):
402
+ super().__init__()
403
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
404
+ raise ValueError(
405
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
406
+ f"heads ({config.num_attention_heads})"
407
+ )
408
+
409
+ self.num_attention_heads = config.num_attention_heads
410
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
411
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
412
+
413
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
414
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
415
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
416
+
417
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
418
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
419
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
420
+ self.max_position_embeddings = config.max_position_embeddings
421
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
422
+
423
+ def transpose_for_scores(self, x):
424
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
425
+ x = x.view(*new_x_shape)
426
+ return x.permute(0, 2, 1, 3)
427
+
428
+ def forward(
429
+ self,
430
+ from_tensor: torch.Tensor,
431
+ to_tensor: torch.Tensor,
432
+ attention_mask: Optional[torch.FloatTensor] = None,
433
+ head_mask: Optional[torch.FloatTensor] = None,
434
+ output_attentions: Optional[bool] = False,
435
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
436
+ mixed_query_layer = self.query(from_tensor)
437
+
438
+ # If this is instantiated as a cross-attention module, the keys
439
+ # and values come from an encoder; the attention mask needs to be
440
+ # such that the encoder's padding tokens are not attended to.
441
+
442
+ key_layer = self.transpose_for_scores(self.key(to_tensor))
443
+ value_layer = self.transpose_for_scores(self.value(to_tensor))
444
+
445
+ query_layer = self.transpose_for_scores(mixed_query_layer)
446
+
447
+ # Take the dot product between "query" and "key" to get the raw attention scores.
448
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
449
+
450
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
451
+ seq_length = from_tensor.size()[1]
452
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1)
453
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1)
454
+ distance = position_ids_l - position_ids_r
455
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
456
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
457
+
458
+ if self.position_embedding_type == "relative_key":
459
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
460
+ attention_scores = attention_scores + relative_position_scores
461
+ elif self.position_embedding_type == "relative_key_query":
462
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
463
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
464
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
465
+
466
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
467
+ if attention_mask is not None:
468
+ if attention_mask.ndim == 3:
469
+ # if attention_mask is 3D, do the following:
470
+ attention_mask = torch.unsqueeze(attention_mask, dim=1)
471
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
472
+ # masked positions, this operation will create a tensor which is 0.0 for
473
+ # positions we want to attend and the dtype's smallest value for masked positions.
474
+ attention_mask = (1.0 - attention_mask.float()) * torch.finfo(attention_scores.dtype).min
475
+ # Apply the attention mask (precomputed for all layers in CanineModel forward() function)
476
+ attention_scores = attention_scores + attention_mask
477
+
478
+ # Normalize the attention scores to probabilities.
479
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
480
+
481
+ # This is actually dropping out entire tokens to attend to, which might
482
+ # seem a bit unusual, but is taken from the original Transformer paper.
483
+ attention_probs = self.dropout(attention_probs)
484
+
485
+ # Mask heads if we want to
486
+ if head_mask is not None:
487
+ attention_probs = attention_probs * head_mask
488
+
489
+ context_layer = torch.matmul(attention_probs, value_layer)
490
+
491
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
492
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
493
+ context_layer = context_layer.view(*new_context_layer_shape)
494
+
495
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
496
+
497
+ return outputs
498
+
499
+
500
+ class CanineSelfOutput(nn.Module):
501
+ def __init__(self, config):
502
+ super().__init__()
503
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
504
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
505
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
506
+
507
+ def forward(
508
+ self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor
509
+ ) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
510
+ hidden_states = self.dense(hidden_states)
511
+ hidden_states = self.dropout(hidden_states)
512
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
513
+ return hidden_states
514
+
515
+
516
+ class CanineAttention(nn.Module):
517
+ """
518
+ Additional arguments related to local attention:
519
+
520
+ - **local** (`bool`, *optional*, defaults to `False`) -- Whether to apply local attention.
521
+ - **always_attend_to_first_position** (`bool`, *optional*, defaults to `False`) -- Should all blocks be able to
522
+ attend
523
+ to the `to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (`bool`,
524
+ *optional*, defaults to `False`) -- Should the *from_tensor*'s first position be able to attend to all
525
+ positions within the *from_tensor*? - **attend_from_chunk_width** (`int`, *optional*, defaults to 128) -- The
526
+ width of each block-wise chunk in `from_tensor`. - **attend_from_chunk_stride** (`int`, *optional*, defaults to
527
+ 128) -- The number of elements to skip when moving to the next block in `from_tensor`. -
528
+ **attend_to_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in
529
+ *to_tensor*. - **attend_to_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to
530
+ skip when moving to the next block in `to_tensor`.
531
+ """
532
+
533
+ def __init__(
534
+ self,
535
+ config,
536
+ local=False,
537
+ always_attend_to_first_position: bool = False,
538
+ first_position_attends_to_all: bool = False,
539
+ attend_from_chunk_width: int = 128,
540
+ attend_from_chunk_stride: int = 128,
541
+ attend_to_chunk_width: int = 128,
542
+ attend_to_chunk_stride: int = 128,
543
+ ):
544
+ super().__init__()
545
+ self.self = CanineSelfAttention(config)
546
+ self.output = CanineSelfOutput(config)
547
+ self.pruned_heads = set()
548
+
549
+ # additional arguments related to local attention
550
+ self.local = local
551
+ if attend_from_chunk_width < attend_from_chunk_stride:
552
+ raise ValueError(
553
+ "`attend_from_chunk_width` < `attend_from_chunk_stride` would cause sequence positions to get skipped."
554
+ )
555
+ if attend_to_chunk_width < attend_to_chunk_stride:
556
+ raise ValueError(
557
+ "`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped."
558
+ )
559
+ self.always_attend_to_first_position = always_attend_to_first_position
560
+ self.first_position_attends_to_all = first_position_attends_to_all
561
+ self.attend_from_chunk_width = attend_from_chunk_width
562
+ self.attend_from_chunk_stride = attend_from_chunk_stride
563
+ self.attend_to_chunk_width = attend_to_chunk_width
564
+ self.attend_to_chunk_stride = attend_to_chunk_stride
565
+
566
+ def prune_heads(self, heads):
567
+ if len(heads) == 0:
568
+ return
569
+ heads, index = find_pruneable_heads_and_indices(
570
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
571
+ )
572
+
573
+ # Prune linear layers
574
+ self.self.query = prune_linear_layer(self.self.query, index)
575
+ self.self.key = prune_linear_layer(self.self.key, index)
576
+ self.self.value = prune_linear_layer(self.self.value, index)
577
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
578
+
579
+ # Update hyper params and store pruned heads
580
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
581
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
582
+ self.pruned_heads = self.pruned_heads.union(heads)
583
+
584
+ def forward(
585
+ self,
586
+ hidden_states: Tuple[torch.FloatTensor],
587
+ attention_mask: Optional[torch.FloatTensor] = None,
588
+ head_mask: Optional[torch.FloatTensor] = None,
589
+ output_attentions: Optional[bool] = False,
590
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
591
+ if not self.local:
592
+ self_outputs = self.self(hidden_states, hidden_states, attention_mask, head_mask, output_attentions)
593
+ attention_output = self_outputs[0]
594
+ else:
595
+ from_seq_length = to_seq_length = hidden_states.shape[1]
596
+ from_tensor = to_tensor = hidden_states
597
+
598
+ # Create chunks (windows) that we will attend *from* and then concatenate them.
599
+ from_chunks = []
600
+ if self.first_position_attends_to_all:
601
+ from_chunks.append((0, 1))
602
+ # We must skip this first position so that our output sequence is the
603
+ # correct length (this matters in the *from* sequence only).
604
+ from_start = 1
605
+ else:
606
+ from_start = 0
607
+ for chunk_start in range(from_start, from_seq_length, self.attend_from_chunk_stride):
608
+ chunk_end = min(from_seq_length, chunk_start + self.attend_from_chunk_width)
609
+ from_chunks.append((chunk_start, chunk_end))
610
+
611
+ # Determine the chunks (windows) that will attend *to*.
612
+ to_chunks = []
613
+ if self.first_position_attends_to_all:
614
+ to_chunks.append((0, to_seq_length))
615
+ for chunk_start in range(0, to_seq_length, self.attend_to_chunk_stride):
616
+ chunk_end = min(to_seq_length, chunk_start + self.attend_to_chunk_width)
617
+ to_chunks.append((chunk_start, chunk_end))
618
+
619
+ if len(from_chunks) != len(to_chunks):
620
+ raise ValueError(
621
+ f"Expected to have same number of `from_chunks` ({from_chunks}) and "
622
+ f"`to_chunks` ({from_chunks}). Check strides."
623
+ )
624
+
625
+ # next, compute attention scores for each pair of windows and concatenate
626
+ attention_output_chunks = []
627
+ attention_probs_chunks = []
628
+ for (from_start, from_end), (to_start, to_end) in zip(from_chunks, to_chunks):
629
+ from_tensor_chunk = from_tensor[:, from_start:from_end, :]
630
+ to_tensor_chunk = to_tensor[:, to_start:to_end, :]
631
+ # `attention_mask`: <float>[batch_size, from_seq, to_seq]
632
+ # `attention_mask_chunk`: <float>[batch_size, from_seq_chunk, to_seq_chunk]
633
+ attention_mask_chunk = attention_mask[:, from_start:from_end, to_start:to_end]
634
+ if self.always_attend_to_first_position:
635
+ cls_attention_mask = attention_mask[:, from_start:from_end, 0:1]
636
+ attention_mask_chunk = torch.cat([cls_attention_mask, attention_mask_chunk], dim=2)
637
+
638
+ cls_position = to_tensor[:, 0:1, :]
639
+ to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1)
640
+
641
+ attention_outputs_chunk = self.self(
642
+ from_tensor_chunk, to_tensor_chunk, attention_mask_chunk, head_mask, output_attentions
643
+ )
644
+ attention_output_chunks.append(attention_outputs_chunk[0])
645
+ if output_attentions:
646
+ attention_probs_chunks.append(attention_outputs_chunk[1])
647
+
648
+ attention_output = torch.cat(attention_output_chunks, dim=1)
649
+
650
+ attention_output = self.output(attention_output, hidden_states)
651
+ outputs = (attention_output,)
652
+ if not self.local:
653
+ outputs = outputs + self_outputs[1:] # add attentions if we output them
654
+ else:
655
+ outputs = outputs + tuple(attention_probs_chunks) # add attentions if we output them
656
+ return outputs
657
+
658
+
659
+ class CanineIntermediate(nn.Module):
660
+ def __init__(self, config):
661
+ super().__init__()
662
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
663
+ if isinstance(config.hidden_act, str):
664
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
665
+ else:
666
+ self.intermediate_act_fn = config.hidden_act
667
+
668
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
669
+ hidden_states = self.dense(hidden_states)
670
+ hidden_states = self.intermediate_act_fn(hidden_states)
671
+ return hidden_states
672
+
673
+
674
+ class CanineOutput(nn.Module):
675
+ def __init__(self, config):
676
+ super().__init__()
677
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
678
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
679
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
680
+
681
+ def forward(self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> torch.FloatTensor:
682
+ hidden_states = self.dense(hidden_states)
683
+ hidden_states = self.dropout(hidden_states)
684
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
685
+ return hidden_states
686
+
687
+
688
+ class CanineLayer(nn.Module):
689
+ def __init__(
690
+ self,
691
+ config,
692
+ local,
693
+ always_attend_to_first_position,
694
+ first_position_attends_to_all,
695
+ attend_from_chunk_width,
696
+ attend_from_chunk_stride,
697
+ attend_to_chunk_width,
698
+ attend_to_chunk_stride,
699
+ ):
700
+ super().__init__()
701
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
702
+ self.seq_len_dim = 1
703
+ self.attention = CanineAttention(
704
+ config,
705
+ local,
706
+ always_attend_to_first_position,
707
+ first_position_attends_to_all,
708
+ attend_from_chunk_width,
709
+ attend_from_chunk_stride,
710
+ attend_to_chunk_width,
711
+ attend_to_chunk_stride,
712
+ )
713
+ self.intermediate = CanineIntermediate(config)
714
+ self.output = CanineOutput(config)
715
+
716
+ def forward(
717
+ self,
718
+ hidden_states: Tuple[torch.FloatTensor],
719
+ attention_mask: Optional[torch.FloatTensor] = None,
720
+ head_mask: Optional[torch.FloatTensor] = None,
721
+ output_attentions: Optional[bool] = False,
722
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
723
+ self_attention_outputs = self.attention(
724
+ hidden_states,
725
+ attention_mask,
726
+ head_mask,
727
+ output_attentions=output_attentions,
728
+ )
729
+ attention_output = self_attention_outputs[0]
730
+
731
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
732
+
733
+ layer_output = apply_chunking_to_forward(
734
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
735
+ )
736
+ outputs = (layer_output,) + outputs
737
+
738
+ return outputs
739
+
740
+ def feed_forward_chunk(self, attention_output):
741
+ intermediate_output = self.intermediate(attention_output)
742
+ layer_output = self.output(intermediate_output, attention_output)
743
+ return layer_output
744
+
745
+
746
+ class CanineEncoder(nn.Module):
747
+ def __init__(
748
+ self,
749
+ config,
750
+ local=False,
751
+ always_attend_to_first_position=False,
752
+ first_position_attends_to_all=False,
753
+ attend_from_chunk_width=128,
754
+ attend_from_chunk_stride=128,
755
+ attend_to_chunk_width=128,
756
+ attend_to_chunk_stride=128,
757
+ ):
758
+ super().__init__()
759
+ self.config = config
760
+ self.layer = nn.ModuleList(
761
+ [
762
+ CanineLayer(
763
+ config,
764
+ local,
765
+ always_attend_to_first_position,
766
+ first_position_attends_to_all,
767
+ attend_from_chunk_width,
768
+ attend_from_chunk_stride,
769
+ attend_to_chunk_width,
770
+ attend_to_chunk_stride,
771
+ )
772
+ for _ in range(config.num_hidden_layers)
773
+ ]
774
+ )
775
+ self.gradient_checkpointing = False
776
+
777
+ def forward(
778
+ self,
779
+ hidden_states: Tuple[torch.FloatTensor],
780
+ attention_mask: Optional[torch.FloatTensor] = None,
781
+ head_mask: Optional[torch.FloatTensor] = None,
782
+ output_attentions: Optional[bool] = False,
783
+ output_hidden_states: Optional[bool] = False,
784
+ return_dict: Optional[bool] = True,
785
+ ) -> Union[Tuple, BaseModelOutput]:
786
+ all_hidden_states = () if output_hidden_states else None
787
+ all_self_attentions = () if output_attentions else None
788
+
789
+ for i, layer_module in enumerate(self.layer):
790
+ if output_hidden_states:
791
+ all_hidden_states = all_hidden_states + (hidden_states,)
792
+
793
+ layer_head_mask = head_mask[i] if head_mask is not None else None
794
+
795
+ if self.gradient_checkpointing and self.training:
796
+ layer_outputs = self._gradient_checkpointing_func(
797
+ layer_module.__call__,
798
+ hidden_states,
799
+ attention_mask,
800
+ layer_head_mask,
801
+ output_attentions,
802
+ )
803
+ else:
804
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
805
+
806
+ hidden_states = layer_outputs[0]
807
+ if output_attentions:
808
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
809
+
810
+ if output_hidden_states:
811
+ all_hidden_states = all_hidden_states + (hidden_states,)
812
+
813
+ if not return_dict:
814
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
815
+ return BaseModelOutput(
816
+ last_hidden_state=hidden_states,
817
+ hidden_states=all_hidden_states,
818
+ attentions=all_self_attentions,
819
+ )
820
+
821
+
822
+ class CaninePooler(nn.Module):
823
+ def __init__(self, config):
824
+ super().__init__()
825
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
826
+ self.activation = nn.Tanh()
827
+
828
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
829
+ # We "pool" the model by simply taking the hidden state corresponding
830
+ # to the first token.
831
+ first_token_tensor = hidden_states[:, 0]
832
+ pooled_output = self.dense(first_token_tensor)
833
+ pooled_output = self.activation(pooled_output)
834
+ return pooled_output
835
+
836
+
837
+ class CaninePredictionHeadTransform(nn.Module):
838
+ def __init__(self, config):
839
+ super().__init__()
840
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
841
+ if isinstance(config.hidden_act, str):
842
+ self.transform_act_fn = ACT2FN[config.hidden_act]
843
+ else:
844
+ self.transform_act_fn = config.hidden_act
845
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
846
+
847
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
848
+ hidden_states = self.dense(hidden_states)
849
+ hidden_states = self.transform_act_fn(hidden_states)
850
+ hidden_states = self.LayerNorm(hidden_states)
851
+ return hidden_states
852
+
853
+
854
+ class CanineLMPredictionHead(nn.Module):
855
+ def __init__(self, config):
856
+ super().__init__()
857
+ self.transform = CaninePredictionHeadTransform(config)
858
+
859
+ # The output weights are the same as the input embeddings, but there is
860
+ # an output-only bias for each token.
861
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
862
+
863
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
864
+
865
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
866
+ self.decoder.bias = self.bias
867
+
868
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
869
+ hidden_states = self.transform(hidden_states)
870
+ hidden_states = self.decoder(hidden_states)
871
+ return hidden_states
872
+
873
+
874
+ class CanineOnlyMLMHead(nn.Module):
875
+ def __init__(self, config):
876
+ super().__init__()
877
+ self.predictions = CanineLMPredictionHead(config)
878
+
879
+ def forward(
880
+ self,
881
+ sequence_output: Tuple[torch.Tensor],
882
+ ) -> Tuple[torch.Tensor]:
883
+ prediction_scores = self.predictions(sequence_output)
884
+ return prediction_scores
885
+
886
+
887
+ class CaninePreTrainedModel(PreTrainedModel):
888
+ """
889
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
890
+ models.
891
+ """
892
+
893
+ config_class = CanineConfig
894
+ load_tf_weights = load_tf_weights_in_canine
895
+ base_model_prefix = "canine"
896
+ supports_gradient_checkpointing = True
897
+
898
+ def _init_weights(self, module):
899
+ """Initialize the weights"""
900
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
901
+ # Slightly different from the TF version which uses truncated_normal for initialization
902
+ # cf https://github.com/pytorch/pytorch/pull/5617
903
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
904
+ if module.bias is not None:
905
+ module.bias.data.zero_()
906
+ elif isinstance(module, nn.Embedding):
907
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
908
+ if module.padding_idx is not None:
909
+ module.weight.data[module.padding_idx].zero_()
910
+ elif isinstance(module, nn.LayerNorm):
911
+ module.bias.data.zero_()
912
+ module.weight.data.fill_(1.0)
913
+
914
+
915
+ CANINE_START_DOCSTRING = r"""
916
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
917
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
918
+ behavior.
919
+
920
+ Parameters:
921
+ config ([`CanineConfig`]): Model configuration class with all the parameters of the model.
922
+ Initializing with a config file does not load the weights associated with the model, only the
923
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
924
+ """
925
+
926
+ CANINE_INPUTS_DOCSTRING = r"""
927
+ Args:
928
+ input_ids (`torch.LongTensor` of shape `({0})`):
929
+ Indices of input sequence tokens in the vocabulary.
930
+
931
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
932
+ [`PreTrainedTokenizer.__call__`] for details.
933
+
934
+ [What are input IDs?](../glossary#input-ids)
935
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
936
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
937
+
938
+ - 1 for tokens that are **not masked**,
939
+ - 0 for tokens that are **masked**.
940
+
941
+ [What are attention masks?](../glossary#attention-mask)
942
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
943
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
944
+ 1]`:
945
+
946
+ - 0 corresponds to a *sentence A* token,
947
+ - 1 corresponds to a *sentence B* token.
948
+
949
+ [What are token type IDs?](../glossary#token-type-ids)
950
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
951
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
952
+ config.max_position_embeddings - 1]`.
953
+
954
+ [What are position IDs?](../glossary#position-ids)
955
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
956
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
957
+
958
+ - 1 indicates the head is **not masked**,
959
+ - 0 indicates the head is **masked**.
960
+
961
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
962
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
963
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
964
+ model's internal embedding lookup matrix.
965
+ output_attentions (`bool`, *optional*):
966
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
967
+ tensors for more detail.
968
+ output_hidden_states (`bool`, *optional*):
969
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
970
+ more detail.
971
+ return_dict (`bool`, *optional*):
972
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
973
+ """
974
+
975
+
976
+ @add_start_docstrings(
977
+ "The bare CANINE Model transformer outputting raw hidden-states without any specific head on top.",
978
+ CANINE_START_DOCSTRING,
979
+ )
980
+ class CanineModel(CaninePreTrainedModel):
981
+ def __init__(self, config, add_pooling_layer=True):
982
+ super().__init__(config)
983
+ self.config = config
984
+ shallow_config = copy.deepcopy(config)
985
+ shallow_config.num_hidden_layers = 1
986
+
987
+ self.char_embeddings = CanineEmbeddings(config)
988
+ # shallow/low-dim transformer encoder to get a initial character encoding
989
+ self.initial_char_encoder = CanineEncoder(
990
+ shallow_config,
991
+ local=True,
992
+ always_attend_to_first_position=False,
993
+ first_position_attends_to_all=False,
994
+ attend_from_chunk_width=config.local_transformer_stride,
995
+ attend_from_chunk_stride=config.local_transformer_stride,
996
+ attend_to_chunk_width=config.local_transformer_stride,
997
+ attend_to_chunk_stride=config.local_transformer_stride,
998
+ )
999
+ self.chars_to_molecules = CharactersToMolecules(config)
1000
+ # deep transformer encoder
1001
+ self.encoder = CanineEncoder(config)
1002
+ self.projection = ConvProjection(config)
1003
+ # shallow/low-dim transformer encoder to get a final character encoding
1004
+ self.final_char_encoder = CanineEncoder(shallow_config)
1005
+
1006
+ self.pooler = CaninePooler(config) if add_pooling_layer else None
1007
+
1008
+ # Initialize weights and apply final processing
1009
+ self.post_init()
1010
+
1011
+ def _prune_heads(self, heads_to_prune):
1012
+ """
1013
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1014
+ class PreTrainedModel
1015
+ """
1016
+ for layer, heads in heads_to_prune.items():
1017
+ self.encoder.layer[layer].attention.prune_heads(heads)
1018
+
1019
+ def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask):
1020
+ """
1021
+ Create 3D attention mask from a 2D tensor mask.
1022
+
1023
+ Args:
1024
+ from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
1025
+ to_mask: int32 Tensor of shape [batch_size, to_seq_length].
1026
+
1027
+ Returns:
1028
+ float Tensor of shape [batch_size, from_seq_length, to_seq_length].
1029
+ """
1030
+ batch_size, from_seq_length = from_tensor.shape[0], from_tensor.shape[1]
1031
+
1032
+ to_seq_length = to_mask.shape[1]
1033
+
1034
+ to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float()
1035
+
1036
+ # We don't assume that `from_tensor` is a mask (although it could be). We
1037
+ # don't actually care if we attend *from* padding tokens (only *to* padding)
1038
+ # tokens so we create a tensor of all ones.
1039
+ broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device)
1040
+
1041
+ # Here we broadcast along two dimensions to create the mask.
1042
+ mask = broadcast_ones * to_mask
1043
+
1044
+ return mask
1045
+
1046
+ def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int):
1047
+ """Downsample 2D character attention mask to 2D molecule attention mask using MaxPool1d layer."""
1048
+
1049
+ # first, make char_attention_mask 3D by adding a channel dim
1050
+ batch_size, char_seq_len = char_attention_mask.shape
1051
+ poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len))
1052
+
1053
+ # next, apply MaxPool1d to get pooled_molecule_mask of shape (batch_size, 1, mol_seq_len)
1054
+ pooled_molecule_mask = torch.nn.MaxPool1d(kernel_size=downsampling_rate, stride=downsampling_rate)(
1055
+ poolable_char_mask.float()
1056
+ )
1057
+
1058
+ # finally, squeeze to get tensor of shape (batch_size, mol_seq_len)
1059
+ molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1)
1060
+
1061
+ return molecule_attention_mask
1062
+
1063
+ def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: torch.Tensor) -> torch.Tensor:
1064
+ """Repeats molecules to make them the same length as the char sequence."""
1065
+
1066
+ rate = self.config.downsampling_rate
1067
+
1068
+ molecules_without_extra_cls = molecules[:, 1:, :]
1069
+ # `repeated`: [batch_size, almost_char_seq_len, molecule_hidden_size]
1070
+ repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2)
1071
+
1072
+ # So far, we've repeated the elements sufficient for any `char_seq_length`
1073
+ # that's a multiple of `downsampling_rate`. Now we account for the last
1074
+ # n elements (n < `downsampling_rate`), i.e. the remainder of floor
1075
+ # division. We do this by repeating the last molecule a few extra times.
1076
+ last_molecule = molecules[:, -1:, :]
1077
+ remainder_length = torch.fmod(torch.tensor(char_seq_length), torch.tensor(rate)).item()
1078
+ remainder_repeated = torch.repeat_interleave(
1079
+ last_molecule,
1080
+ # +1 molecule to compensate for truncation.
1081
+ repeats=remainder_length + rate,
1082
+ dim=-2,
1083
+ )
1084
+
1085
+ # `repeated`: [batch_size, char_seq_len, molecule_hidden_size]
1086
+ return torch.cat([repeated, remainder_repeated], dim=-2)
1087
+
1088
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1089
+ @add_code_sample_docstrings(
1090
+ checkpoint=_CHECKPOINT_FOR_DOC,
1091
+ output_type=CanineModelOutputWithPooling,
1092
+ config_class=_CONFIG_FOR_DOC,
1093
+ )
1094
+ def forward(
1095
+ self,
1096
+ input_ids: Optional[torch.LongTensor] = None,
1097
+ attention_mask: Optional[torch.FloatTensor] = None,
1098
+ token_type_ids: Optional[torch.LongTensor] = None,
1099
+ position_ids: Optional[torch.LongTensor] = None,
1100
+ head_mask: Optional[torch.FloatTensor] = None,
1101
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1102
+ output_attentions: Optional[bool] = None,
1103
+ output_hidden_states: Optional[bool] = None,
1104
+ return_dict: Optional[bool] = None,
1105
+ ) -> Union[Tuple, CanineModelOutputWithPooling]:
1106
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1107
+ output_hidden_states = (
1108
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1109
+ )
1110
+ all_hidden_states = () if output_hidden_states else None
1111
+ all_self_attentions = () if output_attentions else None
1112
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1113
+
1114
+ if input_ids is not None and inputs_embeds is not None:
1115
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1116
+ elif input_ids is not None:
1117
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1118
+ input_shape = input_ids.size()
1119
+ elif inputs_embeds is not None:
1120
+ input_shape = inputs_embeds.size()[:-1]
1121
+ else:
1122
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1123
+
1124
+ batch_size, seq_length = input_shape
1125
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1126
+
1127
+ if attention_mask is None:
1128
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
1129
+ if token_type_ids is None:
1130
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1131
+
1132
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1133
+ # ourselves in which case we just need to make it broadcastable to all heads.
1134
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1135
+ molecule_attention_mask = self._downsample_attention_mask(
1136
+ attention_mask, downsampling_rate=self.config.downsampling_rate
1137
+ )
1138
+ extended_molecule_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1139
+ molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1])
1140
+ )
1141
+
1142
+ # Prepare head mask if needed
1143
+ # 1.0 in head_mask indicate we keep the head
1144
+ # attention_probs has shape bsz x n_heads x N x N
1145
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1146
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1147
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1148
+
1149
+ # `input_char_embeddings`: shape (batch_size, char_seq, char_dim)
1150
+ input_char_embeddings = self.char_embeddings(
1151
+ input_ids=input_ids,
1152
+ position_ids=position_ids,
1153
+ token_type_ids=token_type_ids,
1154
+ inputs_embeds=inputs_embeds,
1155
+ )
1156
+
1157
+ # Contextualize character embeddings using shallow Transformer.
1158
+ # We use a 3D attention mask for the local attention.
1159
+ # `input_char_encoding`: shape (batch_size, char_seq_len, char_dim)
1160
+ char_attention_mask = self._create_3d_attention_mask_from_input_mask(
1161
+ input_ids if input_ids is not None else inputs_embeds, attention_mask
1162
+ )
1163
+ init_chars_encoder_outputs = self.initial_char_encoder(
1164
+ input_char_embeddings,
1165
+ attention_mask=char_attention_mask,
1166
+ output_attentions=output_attentions,
1167
+ output_hidden_states=output_hidden_states,
1168
+ )
1169
+ input_char_encoding = init_chars_encoder_outputs.last_hidden_state
1170
+
1171
+ # Downsample chars to molecules.
1172
+ # The following lines have dimensions: [batch, molecule_seq, molecule_dim].
1173
+ # In this transformation, we change the dimensionality from `char_dim` to
1174
+ # `molecule_dim`, but do *NOT* add a resnet connection. Instead, we rely on
1175
+ # the resnet connections (a) from the final char transformer stack back into
1176
+ # the original char transformer stack and (b) the resnet connections from
1177
+ # the final char transformer stack back into the deep BERT stack of
1178
+ # molecules.
1179
+ #
1180
+ # Empirically, it is critical to use a powerful enough transformation here:
1181
+ # mean pooling causes training to diverge with huge gradient norms in this
1182
+ # region of the model; using a convolution here resolves this issue. From
1183
+ # this, it seems that molecules and characters require a very different
1184
+ # feature space; intuitively, this makes sense.
1185
+ init_molecule_encoding = self.chars_to_molecules(input_char_encoding)
1186
+
1187
+ # Deep BERT encoder
1188
+ # `molecule_sequence_output`: shape (batch_size, mol_seq_len, mol_dim)
1189
+ encoder_outputs = self.encoder(
1190
+ init_molecule_encoding,
1191
+ attention_mask=extended_molecule_attention_mask,
1192
+ head_mask=head_mask,
1193
+ output_attentions=output_attentions,
1194
+ output_hidden_states=output_hidden_states,
1195
+ return_dict=return_dict,
1196
+ )
1197
+ molecule_sequence_output = encoder_outputs[0]
1198
+ pooled_output = self.pooler(molecule_sequence_output) if self.pooler is not None else None
1199
+
1200
+ # Upsample molecules back to characters.
1201
+ # `repeated_molecules`: shape (batch_size, char_seq_len, mol_hidden_size)
1202
+ repeated_molecules = self._repeat_molecules(molecule_sequence_output, char_seq_length=input_shape[-1])
1203
+
1204
+ # Concatenate representations (contextualized char embeddings and repeated molecules):
1205
+ # `concat`: shape [batch_size, char_seq_len, molecule_hidden_size+char_hidden_final]
1206
+ concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1)
1207
+
1208
+ # Project representation dimension back to hidden_size
1209
+ # `sequence_output`: shape (batch_size, char_seq_len, hidden_size])
1210
+ sequence_output = self.projection(concat)
1211
+
1212
+ # Apply final shallow Transformer
1213
+ # `sequence_output`: shape (batch_size, char_seq_len, hidden_size])
1214
+ final_chars_encoder_outputs = self.final_char_encoder(
1215
+ sequence_output,
1216
+ attention_mask=extended_attention_mask,
1217
+ output_attentions=output_attentions,
1218
+ output_hidden_states=output_hidden_states,
1219
+ )
1220
+ sequence_output = final_chars_encoder_outputs.last_hidden_state
1221
+
1222
+ if output_hidden_states:
1223
+ deep_encoder_hidden_states = encoder_outputs.hidden_states if return_dict else encoder_outputs[1]
1224
+ all_hidden_states = (
1225
+ all_hidden_states
1226
+ + init_chars_encoder_outputs.hidden_states
1227
+ + deep_encoder_hidden_states
1228
+ + final_chars_encoder_outputs.hidden_states
1229
+ )
1230
+
1231
+ if output_attentions:
1232
+ deep_encoder_self_attentions = encoder_outputs.attentions if return_dict else encoder_outputs[-1]
1233
+ all_self_attentions = (
1234
+ all_self_attentions
1235
+ + init_chars_encoder_outputs.attentions
1236
+ + deep_encoder_self_attentions
1237
+ + final_chars_encoder_outputs.attentions
1238
+ )
1239
+
1240
+ if not return_dict:
1241
+ output = (sequence_output, pooled_output)
1242
+ output += tuple(v for v in [all_hidden_states, all_self_attentions] if v is not None)
1243
+ return output
1244
+
1245
+ return CanineModelOutputWithPooling(
1246
+ last_hidden_state=sequence_output,
1247
+ pooler_output=pooled_output,
1248
+ hidden_states=all_hidden_states,
1249
+ attentions=all_self_attentions,
1250
+ )
1251
+
1252
+
1253
+ @add_start_docstrings(
1254
+ """
1255
+ CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1256
+ output) e.g. for GLUE tasks.
1257
+ """,
1258
+ CANINE_START_DOCSTRING,
1259
+ )
1260
+ class CanineForSequenceClassification(CaninePreTrainedModel):
1261
+ def __init__(self, config):
1262
+ super().__init__(config)
1263
+ self.num_labels = config.num_labels
1264
+
1265
+ self.canine = CanineModel(config)
1266
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1267
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1268
+
1269
+ # Initialize weights and apply final processing
1270
+ self.post_init()
1271
+
1272
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1273
+ @add_code_sample_docstrings(
1274
+ checkpoint=_CHECKPOINT_FOR_DOC,
1275
+ output_type=SequenceClassifierOutput,
1276
+ config_class=_CONFIG_FOR_DOC,
1277
+ )
1278
+ def forward(
1279
+ self,
1280
+ input_ids: Optional[torch.LongTensor] = None,
1281
+ attention_mask: Optional[torch.FloatTensor] = None,
1282
+ token_type_ids: Optional[torch.LongTensor] = None,
1283
+ position_ids: Optional[torch.LongTensor] = None,
1284
+ head_mask: Optional[torch.FloatTensor] = None,
1285
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1286
+ labels: Optional[torch.LongTensor] = None,
1287
+ output_attentions: Optional[bool] = None,
1288
+ output_hidden_states: Optional[bool] = None,
1289
+ return_dict: Optional[bool] = None,
1290
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1291
+ r"""
1292
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1293
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1294
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1295
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1296
+ """
1297
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1298
+
1299
+ outputs = self.canine(
1300
+ input_ids,
1301
+ attention_mask=attention_mask,
1302
+ token_type_ids=token_type_ids,
1303
+ position_ids=position_ids,
1304
+ head_mask=head_mask,
1305
+ inputs_embeds=inputs_embeds,
1306
+ output_attentions=output_attentions,
1307
+ output_hidden_states=output_hidden_states,
1308
+ return_dict=return_dict,
1309
+ )
1310
+
1311
+ pooled_output = outputs[1]
1312
+
1313
+ pooled_output = self.dropout(pooled_output)
1314
+ logits = self.classifier(pooled_output)
1315
+
1316
+ loss = None
1317
+ if labels is not None:
1318
+ if self.config.problem_type is None:
1319
+ if self.num_labels == 1:
1320
+ self.config.problem_type = "regression"
1321
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1322
+ self.config.problem_type = "single_label_classification"
1323
+ else:
1324
+ self.config.problem_type = "multi_label_classification"
1325
+
1326
+ if self.config.problem_type == "regression":
1327
+ loss_fct = MSELoss()
1328
+ if self.num_labels == 1:
1329
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1330
+ else:
1331
+ loss = loss_fct(logits, labels)
1332
+ elif self.config.problem_type == "single_label_classification":
1333
+ loss_fct = CrossEntropyLoss()
1334
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1335
+ elif self.config.problem_type == "multi_label_classification":
1336
+ loss_fct = BCEWithLogitsLoss()
1337
+ loss = loss_fct(logits, labels)
1338
+ if not return_dict:
1339
+ output = (logits,) + outputs[2:]
1340
+ return ((loss,) + output) if loss is not None else output
1341
+
1342
+ return SequenceClassifierOutput(
1343
+ loss=loss,
1344
+ logits=logits,
1345
+ hidden_states=outputs.hidden_states,
1346
+ attentions=outputs.attentions,
1347
+ )
1348
+
1349
+
1350
+ @add_start_docstrings(
1351
+ """
1352
+ CANINE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1353
+ softmax) e.g. for RocStories/SWAG tasks.
1354
+ """,
1355
+ CANINE_START_DOCSTRING,
1356
+ )
1357
+ class CanineForMultipleChoice(CaninePreTrainedModel):
1358
+ def __init__(self, config):
1359
+ super().__init__(config)
1360
+
1361
+ self.canine = CanineModel(config)
1362
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1363
+ self.classifier = nn.Linear(config.hidden_size, 1)
1364
+
1365
+ # Initialize weights and apply final processing
1366
+ self.post_init()
1367
+
1368
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1369
+ @add_code_sample_docstrings(
1370
+ checkpoint=_CHECKPOINT_FOR_DOC,
1371
+ output_type=MultipleChoiceModelOutput,
1372
+ config_class=_CONFIG_FOR_DOC,
1373
+ )
1374
+ def forward(
1375
+ self,
1376
+ input_ids: Optional[torch.LongTensor] = None,
1377
+ attention_mask: Optional[torch.FloatTensor] = None,
1378
+ token_type_ids: Optional[torch.LongTensor] = None,
1379
+ position_ids: Optional[torch.LongTensor] = None,
1380
+ head_mask: Optional[torch.FloatTensor] = None,
1381
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1382
+ labels: Optional[torch.LongTensor] = None,
1383
+ output_attentions: Optional[bool] = None,
1384
+ output_hidden_states: Optional[bool] = None,
1385
+ return_dict: Optional[bool] = None,
1386
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1387
+ r"""
1388
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1389
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1390
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1391
+ `input_ids` above)
1392
+ """
1393
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1394
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1395
+
1396
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1397
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1398
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1399
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1400
+ inputs_embeds = (
1401
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1402
+ if inputs_embeds is not None
1403
+ else None
1404
+ )
1405
+
1406
+ outputs = self.canine(
1407
+ input_ids,
1408
+ attention_mask=attention_mask,
1409
+ token_type_ids=token_type_ids,
1410
+ position_ids=position_ids,
1411
+ head_mask=head_mask,
1412
+ inputs_embeds=inputs_embeds,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ )
1417
+
1418
+ pooled_output = outputs[1]
1419
+
1420
+ pooled_output = self.dropout(pooled_output)
1421
+ logits = self.classifier(pooled_output)
1422
+ reshaped_logits = logits.view(-1, num_choices)
1423
+
1424
+ loss = None
1425
+ if labels is not None:
1426
+ loss_fct = CrossEntropyLoss()
1427
+ loss = loss_fct(reshaped_logits, labels)
1428
+
1429
+ if not return_dict:
1430
+ output = (reshaped_logits,) + outputs[2:]
1431
+ return ((loss,) + output) if loss is not None else output
1432
+
1433
+ return MultipleChoiceModelOutput(
1434
+ loss=loss,
1435
+ logits=reshaped_logits,
1436
+ hidden_states=outputs.hidden_states,
1437
+ attentions=outputs.attentions,
1438
+ )
1439
+
1440
+
1441
+ @add_start_docstrings(
1442
+ """
1443
+ CANINE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1444
+ Named-Entity-Recognition (NER) tasks.
1445
+ """,
1446
+ CANINE_START_DOCSTRING,
1447
+ )
1448
+ class CanineForTokenClassification(CaninePreTrainedModel):
1449
+ def __init__(self, config):
1450
+ super().__init__(config)
1451
+ self.num_labels = config.num_labels
1452
+
1453
+ self.canine = CanineModel(config)
1454
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1455
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1456
+
1457
+ # Initialize weights and apply final processing
1458
+ self.post_init()
1459
+
1460
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1461
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1462
+ def forward(
1463
+ self,
1464
+ input_ids: Optional[torch.LongTensor] = None,
1465
+ attention_mask: Optional[torch.FloatTensor] = None,
1466
+ token_type_ids: Optional[torch.LongTensor] = None,
1467
+ position_ids: Optional[torch.LongTensor] = None,
1468
+ head_mask: Optional[torch.FloatTensor] = None,
1469
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1470
+ labels: Optional[torch.LongTensor] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ return_dict: Optional[bool] = None,
1474
+ ) -> Union[Tuple, TokenClassifierOutput]:
1475
+ r"""
1476
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1477
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1478
+
1479
+ Returns:
1480
+
1481
+ Example:
1482
+
1483
+ ```python
1484
+ >>> from transformers import AutoTokenizer, CanineForTokenClassification
1485
+ >>> import torch
1486
+
1487
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/canine-s")
1488
+ >>> model = CanineForTokenClassification.from_pretrained("google/canine-s")
1489
+
1490
+ >>> inputs = tokenizer(
1491
+ ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt"
1492
+ ... )
1493
+
1494
+ >>> with torch.no_grad():
1495
+ ... logits = model(**inputs).logits
1496
+
1497
+ >>> predicted_token_class_ids = logits.argmax(-1)
1498
+
1499
+ >>> # Note that tokens are classified rather then input words which means that
1500
+ >>> # there might be more predicted token classes than words.
1501
+ >>> # Multiple token classes might account for the same word
1502
+ >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]]
1503
+ >>> predicted_tokens_classes # doctest: +SKIP
1504
+ ```
1505
+
1506
+ ```python
1507
+ >>> labels = predicted_token_class_ids
1508
+ >>> loss = model(**inputs, labels=labels).loss
1509
+ >>> round(loss.item(), 2) # doctest: +SKIP
1510
+ ```"""
1511
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1512
+
1513
+ outputs = self.canine(
1514
+ input_ids,
1515
+ attention_mask=attention_mask,
1516
+ token_type_ids=token_type_ids,
1517
+ position_ids=position_ids,
1518
+ head_mask=head_mask,
1519
+ inputs_embeds=inputs_embeds,
1520
+ output_attentions=output_attentions,
1521
+ output_hidden_states=output_hidden_states,
1522
+ return_dict=return_dict,
1523
+ )
1524
+
1525
+ sequence_output = outputs[0]
1526
+
1527
+ sequence_output = self.dropout(sequence_output)
1528
+ logits = self.classifier(sequence_output)
1529
+
1530
+ loss = None
1531
+ if labels is not None:
1532
+ loss_fct = CrossEntropyLoss()
1533
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1534
+
1535
+ if not return_dict:
1536
+ output = (logits,) + outputs[2:]
1537
+ return ((loss,) + output) if loss is not None else output
1538
+
1539
+ return TokenClassifierOutput(
1540
+ loss=loss,
1541
+ logits=logits,
1542
+ hidden_states=outputs.hidden_states,
1543
+ attentions=outputs.attentions,
1544
+ )
1545
+
1546
+
1547
+ @add_start_docstrings(
1548
+ """
1549
+ CANINE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1550
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1551
+ """,
1552
+ CANINE_START_DOCSTRING,
1553
+ )
1554
+ class CanineForQuestionAnswering(CaninePreTrainedModel):
1555
+ def __init__(self, config):
1556
+ super().__init__(config)
1557
+ self.num_labels = config.num_labels
1558
+
1559
+ self.canine = CanineModel(config)
1560
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1561
+
1562
+ # Initialize weights and apply final processing
1563
+ self.post_init()
1564
+
1565
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1566
+ @add_code_sample_docstrings(
1567
+ checkpoint="Splend1dchan/canine-c-squad",
1568
+ output_type=QuestionAnsweringModelOutput,
1569
+ config_class=_CONFIG_FOR_DOC,
1570
+ expected_output="'nice puppet'",
1571
+ expected_loss=8.81,
1572
+ )
1573
+ def forward(
1574
+ self,
1575
+ input_ids: Optional[torch.LongTensor] = None,
1576
+ attention_mask: Optional[torch.FloatTensor] = None,
1577
+ token_type_ids: Optional[torch.LongTensor] = None,
1578
+ position_ids: Optional[torch.LongTensor] = None,
1579
+ head_mask: Optional[torch.FloatTensor] = None,
1580
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1581
+ start_positions: Optional[torch.LongTensor] = None,
1582
+ end_positions: Optional[torch.LongTensor] = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ return_dict: Optional[bool] = None,
1586
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1587
+ r"""
1588
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1589
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1590
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1591
+ are not taken into account for computing the loss.
1592
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1593
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1594
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1595
+ are not taken into account for computing the loss.
1596
+ """
1597
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1598
+
1599
+ outputs = self.canine(
1600
+ input_ids,
1601
+ attention_mask=attention_mask,
1602
+ token_type_ids=token_type_ids,
1603
+ position_ids=position_ids,
1604
+ head_mask=head_mask,
1605
+ inputs_embeds=inputs_embeds,
1606
+ output_attentions=output_attentions,
1607
+ output_hidden_states=output_hidden_states,
1608
+ return_dict=return_dict,
1609
+ )
1610
+
1611
+ sequence_output = outputs[0]
1612
+
1613
+ logits = self.qa_outputs(sequence_output)
1614
+ start_logits, end_logits = logits.split(1, dim=-1)
1615
+ start_logits = start_logits.squeeze(-1)
1616
+ end_logits = end_logits.squeeze(-1)
1617
+
1618
+ total_loss = None
1619
+ if start_positions is not None and end_positions is not None:
1620
+ # If we are on multi-GPU, split add a dimension
1621
+ if len(start_positions.size()) > 1:
1622
+ start_positions = start_positions.squeeze(-1)
1623
+ if len(end_positions.size()) > 1:
1624
+ end_positions = end_positions.squeeze(-1)
1625
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1626
+ ignored_index = start_logits.size(1)
1627
+ start_positions.clamp_(0, ignored_index)
1628
+ end_positions.clamp_(0, ignored_index)
1629
+
1630
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1631
+ start_loss = loss_fct(start_logits, start_positions)
1632
+ end_loss = loss_fct(end_logits, end_positions)
1633
+ total_loss = (start_loss + end_loss) / 2
1634
+
1635
+ if not return_dict:
1636
+ output = (start_logits, end_logits) + outputs[2:]
1637
+ return ((total_loss,) + output) if total_loss is not None else output
1638
+
1639
+ return QuestionAnsweringModelOutput(
1640
+ loss=total_loss,
1641
+ start_logits=start_logits,
1642
+ end_logits=end_logits,
1643
+ hidden_states=outputs.hidden_states,
1644
+ attentions=outputs.attentions,
1645
+ )
venv/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CANINE."""
16
+
17
+ from typing import Dict, List, Optional
18
+
19
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ # Unicode defines 1,114,112 total “codepoints”
27
+ UNICODE_VOCAB_SIZE = 1114112
28
+
29
+ # Below: Constants defining canonical codepoints for special, pseudo-characters.
30
+ # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
31
+ PAD = 0
32
+ CLS = 0xE000
33
+ SEP = 0xE001
34
+ BOS = 0xE002
35
+ MASK = 0xE003
36
+ RESERVED = 0xE004
37
+
38
+ # Maps special codepoints to human-readable names.
39
+ SPECIAL_CODEPOINTS: Dict[int, str] = {
40
+ # Special symbols are represented using codepoints values that are valid,
41
+ # but designated as "Private Use", meaning that they will never be assigned
42
+ # characters by the Unicode Consortium, and are thus safe for use here.
43
+ #
44
+ # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
45
+ # excluded and should fail with a hard error.
46
+ CLS: "[CLS]",
47
+ SEP: "[SEP]",
48
+ BOS: "[BOS]",
49
+ MASK: "[MASK]",
50
+ PAD: "[PAD]",
51
+ RESERVED: "[RESERVED]",
52
+ }
53
+
54
+ # Maps special codepoint human-readable names to their codepoint values.
55
+ SPECIAL_CODEPOINTS_BY_NAME: Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
56
+
57
+
58
+ class CanineTokenizer(PreTrainedTokenizer):
59
+ r"""
60
+ Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then
61
+ converts each character into its Unicode code point.
62
+
63
+ [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`].
64
+
65
+ Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters.
66
+
67
+ Args:
68
+ model_max_length (`int`, *optional*, defaults to 2048):
69
+ The maximum sentence length the model accepts.
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ bos_token=chr(CLS),
75
+ eos_token=chr(SEP),
76
+ sep_token=chr(SEP),
77
+ cls_token=chr(CLS),
78
+ pad_token=chr(PAD),
79
+ mask_token=chr(MASK),
80
+ add_prefix_space=False,
81
+ model_max_length=2048,
82
+ **kwargs,
83
+ ):
84
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
85
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
86
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
87
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
88
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
89
+
90
+ # Mask token behave like a normal word, i.e. include the space before it
91
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
92
+
93
+ # Creates a mapping for looking up the IDs of special symbols.
94
+ self._special_codepoints: Dict[str, int] = {}
95
+ for codepoint, name in SPECIAL_CODEPOINTS.items():
96
+ self._special_codepoints[name] = codepoint
97
+
98
+ # Creates a mapping for looking up the string forms of special symbol IDs.
99
+ self._special_codepoint_strings: Dict[int, str] = {
100
+ codepoint: name for name, codepoint in self._special_codepoints.items()
101
+ }
102
+
103
+ self._unicode_vocab_size = UNICODE_VOCAB_SIZE
104
+ self._num_special_tokens = len(self._special_codepoints)
105
+
106
+ super().__init__(
107
+ bos_token=bos_token,
108
+ eos_token=eos_token,
109
+ sep_token=sep_token,
110
+ cls_token=cls_token,
111
+ pad_token=pad_token,
112
+ mask_token=mask_token,
113
+ add_prefix_space=add_prefix_space,
114
+ model_max_length=model_max_length,
115
+ **kwargs,
116
+ )
117
+
118
+ @property
119
+ def vocab_size(self) -> int:
120
+ return self._unicode_vocab_size
121
+
122
+ def get_vocab(self):
123
+ vocab = {chr(i): i for i in range(self.vocab_size)}
124
+ vocab.update(self.added_tokens_encoder)
125
+ return vocab
126
+
127
+ def _tokenize(self, text: str) -> List[str]:
128
+ """Tokenize a string (i.e. perform character splitting)."""
129
+ return list(text)
130
+
131
+ def _convert_token_to_id(self, token: str) -> int:
132
+ """Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value)."""
133
+ try:
134
+ return ord(token)
135
+ except TypeError:
136
+ raise ValueError(f"invalid token: '{token}'")
137
+
138
+ def _convert_id_to_token(self, index: int) -> str:
139
+ """
140
+ Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to
141
+ human-readable format.
142
+ """
143
+ try:
144
+ if index in SPECIAL_CODEPOINTS:
145
+ return SPECIAL_CODEPOINTS[index]
146
+ return chr(index)
147
+ except TypeError:
148
+ raise ValueError(f"invalid id: {index}")
149
+
150
+ def convert_tokens_to_string(self, tokens):
151
+ return "".join(tokens)
152
+
153
+ def build_inputs_with_special_tokens(
154
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
155
+ ) -> List[int]:
156
+ """
157
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
158
+ adding special tokens. A CANINE sequence has the following format:
159
+
160
+ - single sequence: `[CLS] X [SEP]`
161
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
162
+
163
+ Args:
164
+ token_ids_0 (`List[int]`):
165
+ List of IDs to which the special tokens will be added.
166
+ token_ids_1 (`List[int]`, *optional*):
167
+ Optional second list of IDs for sequence pairs.
168
+
169
+ Returns:
170
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
171
+ """
172
+ sep = [self.sep_token_id]
173
+ cls = [self.cls_token_id]
174
+
175
+ result = cls + token_ids_0 + sep
176
+ if token_ids_1 is not None:
177
+ result += token_ids_1 + sep
178
+ return result
179
+
180
+ def get_special_tokens_mask(
181
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
182
+ ) -> List[int]:
183
+ """
184
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
185
+ special tokens using the tokenizer `prepare_for_model` method.
186
+
187
+ Args:
188
+ token_ids_0 (`List[int]`):
189
+ List of IDs.
190
+ token_ids_1 (`List[int]`, *optional*):
191
+ Optional second list of IDs for sequence pairs.
192
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
193
+ Whether or not the token list is already formatted with special tokens for the model.
194
+
195
+ Returns:
196
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
197
+ """
198
+ if already_has_special_tokens:
199
+ return super().get_special_tokens_mask(
200
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
201
+ )
202
+
203
+ result = [1] + ([0] * len(token_ids_0)) + [1]
204
+ if token_ids_1 is not None:
205
+ result += ([0] * len(token_ids_1)) + [1]
206
+ return result
207
+
208
+ def create_token_type_ids_from_sequences(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
210
+ ) -> List[int]:
211
+ """
212
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE
213
+ sequence pair mask has the following format:
214
+
215
+ ```
216
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
217
+ | first sequence | second sequence |
218
+ ```
219
+
220
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
230
+ """
231
+ sep = [self.sep_token_id]
232
+ cls = [self.cls_token_id]
233
+
234
+ result = len(cls + token_ids_0 + sep) * [0]
235
+ if token_ids_1 is not None:
236
+ result += len(token_ids_1 + sep) * [1]
237
+ return result
238
+
239
+ # CanineTokenizer has no vocab file
240
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None):
241
+ return ()
venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/configuration_cvt.cpython-310.pyc ADDED
Binary file (6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_cvt.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc ADDED
Binary file (34.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__init__.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_layoutlmv3": [
29
+ "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "LayoutLMv3Config",
31
+ "LayoutLMv3OnnxConfig",
32
+ ],
33
+ "processing_layoutlmv3": ["LayoutLMv3Processor"],
34
+ "tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
35
+ }
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_layoutlmv3_fast"] = ["LayoutLMv3TokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_layoutlmv3"] = [
52
+ "LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
53
+ "LayoutLMv3ForQuestionAnswering",
54
+ "LayoutLMv3ForSequenceClassification",
55
+ "LayoutLMv3ForTokenClassification",
56
+ "LayoutLMv3Model",
57
+ "LayoutLMv3PreTrainedModel",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_layoutlmv3"] = [
67
+ "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFLayoutLMv3ForQuestionAnswering",
69
+ "TFLayoutLMv3ForSequenceClassification",
70
+ "TFLayoutLMv3ForTokenClassification",
71
+ "TFLayoutLMv3Model",
72
+ "TFLayoutLMv3PreTrainedModel",
73
+ ]
74
+
75
+ try:
76
+ if not is_vision_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ _import_structure["feature_extraction_layoutlmv3"] = ["LayoutLMv3FeatureExtractor"]
82
+ _import_structure["image_processing_layoutlmv3"] = ["LayoutLMv3ImageProcessor"]
83
+
84
+
85
+ if TYPE_CHECKING:
86
+ from .configuration_layoutlmv3 import (
87
+ LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
88
+ LayoutLMv3Config,
89
+ LayoutLMv3OnnxConfig,
90
+ )
91
+ from .processing_layoutlmv3 import LayoutLMv3Processor
92
+ from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer
93
+
94
+ try:
95
+ if not is_tokenizers_available():
96
+ raise OptionalDependencyNotAvailable()
97
+ except OptionalDependencyNotAvailable:
98
+ pass
99
+ else:
100
+ from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast
101
+
102
+ try:
103
+ if not is_torch_available():
104
+ raise OptionalDependencyNotAvailable()
105
+ except OptionalDependencyNotAvailable:
106
+ pass
107
+ else:
108
+ from .modeling_layoutlmv3 import (
109
+ LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
110
+ LayoutLMv3ForQuestionAnswering,
111
+ LayoutLMv3ForSequenceClassification,
112
+ LayoutLMv3ForTokenClassification,
113
+ LayoutLMv3Model,
114
+ LayoutLMv3PreTrainedModel,
115
+ )
116
+
117
+ try:
118
+ if not is_tf_available():
119
+ raise OptionalDependencyNotAvailable()
120
+ except OptionalDependencyNotAvailable:
121
+ pass
122
+ else:
123
+ from .modeling_tf_layoutlmv3 import (
124
+ TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
125
+ TFLayoutLMv3ForQuestionAnswering,
126
+ TFLayoutLMv3ForSequenceClassification,
127
+ TFLayoutLMv3ForTokenClassification,
128
+ TFLayoutLMv3Model,
129
+ TFLayoutLMv3PreTrainedModel,
130
+ )
131
+
132
+ try:
133
+ if not is_vision_available():
134
+ raise OptionalDependencyNotAvailable()
135
+ except OptionalDependencyNotAvailable:
136
+ pass
137
+ else:
138
+ from .feature_extraction_layoutlmv3 import LayoutLMv3FeatureExtractor
139
+ from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor
140
+
141
+ else:
142
+ import sys
143
+
144
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc ADDED
Binary file (42.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc ADDED
Binary file (51.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc ADDED
Binary file (7.23 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3.cpython-310.pyc ADDED
Binary file (47.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/configuration_layoutlmv3.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LayoutLMv3 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import TYPE_CHECKING, Any, Mapping, Optional
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...onnx.utils import compute_effective_axis_dimension
25
+ from ...utils import logging
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from ...processing_utils import ProcessorMixin
30
+ from ...utils import TensorType
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ from ..deprecated._archive_maps import LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
37
+
38
+
39
+ class LayoutLMv3Config(PretrainedConfig):
40
+ r"""
41
+ This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an
42
+ LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a
43
+ configuration with the defaults will yield a similar configuration to that of the LayoutLMv3
44
+ [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture.
45
+
46
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
47
+ documentation from [`PretrainedConfig`] for more information.
48
+
49
+ Args:
50
+ vocab_size (`int`, *optional*, defaults to 50265):
51
+ Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by
52
+ the `inputs_ids` passed when calling [`LayoutLMv3Model`].
53
+ hidden_size (`int`, *optional*, defaults to 768):
54
+ Dimension of the encoder layers and the pooler layer.
55
+ num_hidden_layers (`int`, *optional*, defaults to 12):
56
+ Number of hidden layers in the Transformer encoder.
57
+ num_attention_heads (`int`, *optional*, defaults to 12):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ intermediate_size (`int`, *optional*, defaults to 3072):
60
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
62
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
63
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
64
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
65
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
66
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
67
+ The dropout ratio for the attention probabilities.
68
+ max_position_embeddings (`int`, *optional*, defaults to 512):
69
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
70
+ just in case (e.g., 512 or 1024 or 2048).
71
+ type_vocab_size (`int`, *optional*, defaults to 2):
72
+ The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`].
73
+ initializer_range (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
76
+ The epsilon used by the layer normalization layers.
77
+ max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
78
+ The maximum value that the 2D position embedding might ever be used with. Typically set this to something
79
+ large just in case (e.g., 1024).
80
+ coordinate_size (`int`, *optional*, defaults to `128`):
81
+ Dimension of the coordinate embeddings.
82
+ shape_size (`int`, *optional*, defaults to `128`):
83
+ Dimension of the width and height embeddings.
84
+ has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
85
+ Whether or not to use a relative attention bias in the self-attention mechanism.
86
+ rel_pos_bins (`int`, *optional*, defaults to 32):
87
+ The number of relative position bins to be used in the self-attention mechanism.
88
+ max_rel_pos (`int`, *optional*, defaults to 128):
89
+ The maximum number of relative positions to be used in the self-attention mechanism.
90
+ max_rel_2d_pos (`int`, *optional*, defaults to 256):
91
+ The maximum number of relative 2D positions in the self-attention mechanism.
92
+ rel_2d_pos_bins (`int`, *optional*, defaults to 64):
93
+ The number of 2D relative position bins in the self-attention mechanism.
94
+ has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
95
+ Whether or not to use a spatial attention bias in the self-attention mechanism.
96
+ visual_embed (`bool`, *optional*, defaults to `True`):
97
+ Whether or not to add patch embeddings.
98
+ input_size (`int`, *optional*, defaults to `224`):
99
+ The size (resolution) of the images.
100
+ num_channels (`int`, *optional*, defaults to `3`):
101
+ The number of channels of the images.
102
+ patch_size (`int`, *optional*, defaults to `16`)
103
+ The size (resolution) of the patches.
104
+ classifier_dropout (`float`, *optional*):
105
+ The dropout ratio for the classification head.
106
+
107
+ Example:
108
+
109
+ ```python
110
+ >>> from transformers import LayoutLMv3Config, LayoutLMv3Model
111
+
112
+ >>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration
113
+ >>> configuration = LayoutLMv3Config()
114
+
115
+ >>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration
116
+ >>> model = LayoutLMv3Model(configuration)
117
+
118
+ >>> # Accessing the model configuration
119
+ >>> configuration = model.config
120
+ ```"""
121
+
122
+ model_type = "layoutlmv3"
123
+
124
+ def __init__(
125
+ self,
126
+ vocab_size=50265,
127
+ hidden_size=768,
128
+ num_hidden_layers=12,
129
+ num_attention_heads=12,
130
+ intermediate_size=3072,
131
+ hidden_act="gelu",
132
+ hidden_dropout_prob=0.1,
133
+ attention_probs_dropout_prob=0.1,
134
+ max_position_embeddings=512,
135
+ type_vocab_size=2,
136
+ initializer_range=0.02,
137
+ layer_norm_eps=1e-5,
138
+ pad_token_id=1,
139
+ bos_token_id=0,
140
+ eos_token_id=2,
141
+ max_2d_position_embeddings=1024,
142
+ coordinate_size=128,
143
+ shape_size=128,
144
+ has_relative_attention_bias=True,
145
+ rel_pos_bins=32,
146
+ max_rel_pos=128,
147
+ rel_2d_pos_bins=64,
148
+ max_rel_2d_pos=256,
149
+ has_spatial_attention_bias=True,
150
+ text_embed=True,
151
+ visual_embed=True,
152
+ input_size=224,
153
+ num_channels=3,
154
+ patch_size=16,
155
+ classifier_dropout=None,
156
+ **kwargs,
157
+ ):
158
+ super().__init__(
159
+ vocab_size=vocab_size,
160
+ hidden_size=hidden_size,
161
+ num_hidden_layers=num_hidden_layers,
162
+ num_attention_heads=num_attention_heads,
163
+ intermediate_size=intermediate_size,
164
+ hidden_act=hidden_act,
165
+ hidden_dropout_prob=hidden_dropout_prob,
166
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
167
+ max_position_embeddings=max_position_embeddings,
168
+ type_vocab_size=type_vocab_size,
169
+ initializer_range=initializer_range,
170
+ layer_norm_eps=layer_norm_eps,
171
+ pad_token_id=pad_token_id,
172
+ bos_token_id=bos_token_id,
173
+ eos_token_id=eos_token_id,
174
+ **kwargs,
175
+ )
176
+ self.max_2d_position_embeddings = max_2d_position_embeddings
177
+ self.coordinate_size = coordinate_size
178
+ self.shape_size = shape_size
179
+ self.has_relative_attention_bias = has_relative_attention_bias
180
+ self.rel_pos_bins = rel_pos_bins
181
+ self.max_rel_pos = max_rel_pos
182
+ self.has_spatial_attention_bias = has_spatial_attention_bias
183
+ self.rel_2d_pos_bins = rel_2d_pos_bins
184
+ self.max_rel_2d_pos = max_rel_2d_pos
185
+ self.text_embed = text_embed
186
+ self.visual_embed = visual_embed
187
+ self.input_size = input_size
188
+ self.num_channels = num_channels
189
+ self.patch_size = patch_size
190
+ self.classifier_dropout = classifier_dropout
191
+
192
+
193
+ class LayoutLMv3OnnxConfig(OnnxConfig):
194
+ torch_onnx_minimum_version = version.parse("1.12")
195
+
196
+ @property
197
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
198
+ # The order of inputs is different for question answering and sequence classification
199
+ if self.task in ["question-answering", "sequence-classification"]:
200
+ return OrderedDict(
201
+ [
202
+ ("input_ids", {0: "batch", 1: "sequence"}),
203
+ ("attention_mask", {0: "batch", 1: "sequence"}),
204
+ ("bbox", {0: "batch", 1: "sequence"}),
205
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
206
+ ]
207
+ )
208
+ else:
209
+ return OrderedDict(
210
+ [
211
+ ("input_ids", {0: "batch", 1: "sequence"}),
212
+ ("bbox", {0: "batch", 1: "sequence"}),
213
+ ("attention_mask", {0: "batch", 1: "sequence"}),
214
+ ("pixel_values", {0: "batch", 1: "num_channels"}),
215
+ ]
216
+ )
217
+
218
+ @property
219
+ def atol_for_validation(self) -> float:
220
+ return 1e-5
221
+
222
+ @property
223
+ def default_onnx_opset(self) -> int:
224
+ return 12
225
+
226
+ def generate_dummy_inputs(
227
+ self,
228
+ processor: "ProcessorMixin",
229
+ batch_size: int = -1,
230
+ seq_length: int = -1,
231
+ is_pair: bool = False,
232
+ framework: Optional["TensorType"] = None,
233
+ num_channels: int = 3,
234
+ image_width: int = 40,
235
+ image_height: int = 40,
236
+ ) -> Mapping[str, Any]:
237
+ """
238
+ Generate inputs to provide to the ONNX exporter for the specific framework
239
+
240
+ Args:
241
+ processor ([`ProcessorMixin`]):
242
+ The processor associated with this model configuration.
243
+ batch_size (`int`, *optional*, defaults to -1):
244
+ The batch size to export the model for (-1 means dynamic axis).
245
+ seq_length (`int`, *optional*, defaults to -1):
246
+ The sequence length to export the model for (-1 means dynamic axis).
247
+ is_pair (`bool`, *optional*, defaults to `False`):
248
+ Indicate if the input is a pair (sentence 1, sentence 2).
249
+ framework (`TensorType`, *optional*, defaults to `None`):
250
+ The framework (PyTorch or TensorFlow) that the processor will generate tensors for.
251
+ num_channels (`int`, *optional*, defaults to 3):
252
+ The number of channels of the generated images.
253
+ image_width (`int`, *optional*, defaults to 40):
254
+ The width of the generated images.
255
+ image_height (`int`, *optional*, defaults to 40):
256
+ The height of the generated images.
257
+
258
+ Returns:
259
+ Mapping[str, Any]: holding the kwargs to provide to the model's forward function
260
+ """
261
+
262
+ # A dummy image is used so OCR should not be applied
263
+ setattr(processor.image_processor, "apply_ocr", False)
264
+
265
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
266
+ batch_size = compute_effective_axis_dimension(
267
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
268
+ )
269
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
270
+ token_to_add = processor.tokenizer.num_special_tokens_to_add(is_pair)
271
+ seq_length = compute_effective_axis_dimension(
272
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
273
+ )
274
+ # Generate dummy inputs according to compute batch and sequence
275
+ dummy_text = [[" ".join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
276
+
277
+ # Generate dummy bounding boxes
278
+ dummy_bboxes = [[[48, 84, 73, 128]]] * batch_size
279
+
280
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
281
+ # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
282
+ dummy_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
283
+
284
+ inputs = dict(
285
+ processor(
286
+ dummy_image,
287
+ text=dummy_text,
288
+ boxes=dummy_bboxes,
289
+ return_tensors=framework,
290
+ )
291
+ )
292
+
293
+ return inputs
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for LayoutLMv3.
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...utils import logging
22
+ from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class LayoutLMv3FeatureExtractor(LayoutLMv3ImageProcessor):
29
+ def __init__(self, *args, **kwargs) -> None:
30
+ warnings.warn(
31
+ "The class LayoutLMv3FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
32
+ " Please use LayoutLMv3ImageProcessor instead.",
33
+ FutureWarning,
34
+ )
35
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/image_processing_layoutlmv3.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LayoutLMv3."""
16
+
17
+ from typing import Dict, Iterable, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import resize, to_channel_dimension_format, to_pil_image
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ validate_kwargs,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
38
+
39
+
40
+ if is_vision_available():
41
+ import PIL
42
+
43
+ # soft dependency
44
+ if is_pytesseract_available():
45
+ import pytesseract
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ def normalize_box(box, width, height):
51
+ return [
52
+ int(1000 * (box[0] / width)),
53
+ int(1000 * (box[1] / height)),
54
+ int(1000 * (box[2] / width)),
55
+ int(1000 * (box[3] / height)),
56
+ ]
57
+
58
+
59
+ def apply_tesseract(
60
+ image: np.ndarray,
61
+ lang: Optional[str],
62
+ tesseract_config: Optional[str],
63
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
64
+ ):
65
+ """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
66
+
67
+ # apply OCR
68
+ pil_image = to_pil_image(image, input_data_format=input_data_format)
69
+ image_width, image_height = pil_image.size
70
+ data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config)
71
+ words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
72
+
73
+ # filter empty words and corresponding coordinates
74
+ irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
75
+ words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
76
+ left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
77
+ top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
78
+ width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
79
+ height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
80
+
81
+ # turn coordinates into (left, top, left+width, top+height) format
82
+ actual_boxes = []
83
+ for x, y, w, h in zip(left, top, width, height):
84
+ actual_box = [x, y, x + w, y + h]
85
+ actual_boxes.append(actual_box)
86
+
87
+ # finally, normalize the bounding boxes
88
+ normalized_boxes = []
89
+ for box in actual_boxes:
90
+ normalized_boxes.append(normalize_box(box, image_width, image_height))
91
+
92
+ assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
93
+
94
+ return words, normalized_boxes
95
+
96
+
97
+ class LayoutLMv3ImageProcessor(BaseImageProcessor):
98
+ r"""
99
+ Constructs a LayoutLMv3 image processor.
100
+
101
+ Args:
102
+ do_resize (`bool`, *optional*, defaults to `True`):
103
+ Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be
104
+ overridden by `do_resize` in `preprocess`.
105
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
106
+ Size of the image after resizing. Can be overridden by `size` in `preprocess`.
107
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
108
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
109
+ do_rescale (`bool`, *optional*, defaults to `True`):
110
+ Whether to rescale the image's pixel values by the specified `rescale_value`. Can be overridden by
111
+ `do_rescale` in `preprocess`.
112
+ rescale_factor (`float`, *optional*, defaults to 1 / 255):
113
+ Value by which the image's pixel values are rescaled. Can be overridden by `rescale_factor` in
114
+ `preprocess`.
115
+ do_normalize (`bool`, *optional*, defaults to `True`):
116
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
117
+ method.
118
+ image_mean (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
119
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
120
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
121
+ image_std (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
122
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
123
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
124
+ apply_ocr (`bool`, *optional*, defaults to `True`):
125
+ Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
126
+ the `apply_ocr` parameter in the `preprocess` method.
127
+ ocr_lang (`str`, *optional*):
128
+ The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
129
+ used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
130
+ tesseract_config (`str`, *optional*):
131
+ Any additional custom configuration flags that are forwarded to the `config` parameter when calling
132
+ Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
133
+ `preprocess` method.
134
+ """
135
+
136
+ model_input_names = ["pixel_values"]
137
+
138
+ def __init__(
139
+ self,
140
+ do_resize: bool = True,
141
+ size: Dict[str, int] = None,
142
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
143
+ do_rescale: bool = True,
144
+ rescale_value: float = 1 / 255,
145
+ do_normalize: bool = True,
146
+ image_mean: Union[float, Iterable[float]] = None,
147
+ image_std: Union[float, Iterable[float]] = None,
148
+ apply_ocr: bool = True,
149
+ ocr_lang: Optional[str] = None,
150
+ tesseract_config: Optional[str] = "",
151
+ **kwargs,
152
+ ) -> None:
153
+ super().__init__(**kwargs)
154
+ size = size if size is not None else {"height": 224, "width": 224}
155
+ size = get_size_dict(size)
156
+
157
+ self.do_resize = do_resize
158
+ self.size = size
159
+ self.resample = resample
160
+ self.do_rescale = do_rescale
161
+ self.rescale_factor = rescale_value
162
+ self.do_normalize = do_normalize
163
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
164
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
165
+ self.apply_ocr = apply_ocr
166
+ self.ocr_lang = ocr_lang
167
+ self.tesseract_config = tesseract_config
168
+ self._valid_processor_keys = [
169
+ "images",
170
+ "do_resize",
171
+ "size",
172
+ "resample",
173
+ "do_rescale",
174
+ "rescale_factor",
175
+ "do_normalize",
176
+ "image_mean",
177
+ "image_std",
178
+ "apply_ocr",
179
+ "ocr_lang",
180
+ "tesseract_config",
181
+ "return_tensors",
182
+ "data_format",
183
+ "input_data_format",
184
+ ]
185
+
186
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
187
+ def resize(
188
+ self,
189
+ image: np.ndarray,
190
+ size: Dict[str, int],
191
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
192
+ data_format: Optional[Union[str, ChannelDimension]] = None,
193
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
194
+ **kwargs,
195
+ ) -> np.ndarray:
196
+ """
197
+ Resize an image to `(size["height"], size["width"])`.
198
+
199
+ Args:
200
+ image (`np.ndarray`):
201
+ Image to resize.
202
+ size (`Dict[str, int]`):
203
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
204
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
205
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
206
+ data_format (`ChannelDimension` or `str`, *optional*):
207
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
208
+ image is used. Can be one of:
209
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
210
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
211
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
212
+ input_data_format (`ChannelDimension` or `str`, *optional*):
213
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
214
+ from the input image. Can be one of:
215
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
216
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
217
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
218
+
219
+ Returns:
220
+ `np.ndarray`: The resized image.
221
+ """
222
+ size = get_size_dict(size)
223
+ if "height" not in size or "width" not in size:
224
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
225
+ output_size = (size["height"], size["width"])
226
+ return resize(
227
+ image,
228
+ size=output_size,
229
+ resample=resample,
230
+ data_format=data_format,
231
+ input_data_format=input_data_format,
232
+ **kwargs,
233
+ )
234
+
235
+ def preprocess(
236
+ self,
237
+ images: ImageInput,
238
+ do_resize: bool = None,
239
+ size: Dict[str, int] = None,
240
+ resample=None,
241
+ do_rescale: bool = None,
242
+ rescale_factor: float = None,
243
+ do_normalize: bool = None,
244
+ image_mean: Union[float, Iterable[float]] = None,
245
+ image_std: Union[float, Iterable[float]] = None,
246
+ apply_ocr: bool = None,
247
+ ocr_lang: Optional[str] = None,
248
+ tesseract_config: Optional[str] = None,
249
+ return_tensors: Optional[Union[str, TensorType]] = None,
250
+ data_format: ChannelDimension = ChannelDimension.FIRST,
251
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
252
+ **kwargs,
253
+ ) -> PIL.Image.Image:
254
+ """
255
+ Preprocess an image or batch of images.
256
+
257
+ Args:
258
+ images (`ImageInput`):
259
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
260
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
261
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
262
+ Whether to resize the image.
263
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
264
+ Desired size of the output image after applying `resize`.
265
+ resample (`int`, *optional*, defaults to `self.resample`):
266
+ Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` filters.
267
+ Only has an effect if `do_resize` is set to `True`.
268
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
269
+ Whether to rescale the image pixel values between [0, 1].
270
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
271
+ Rescale factor to apply to the image pixel values. Only has an effect if `do_rescale` is set to `True`.
272
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
273
+ Whether to normalize the image.
274
+ image_mean (`float` or `Iterable[float]`, *optional*, defaults to `self.image_mean`):
275
+ Mean values to be used for normalization. Only has an effect if `do_normalize` is set to `True`.
276
+ image_std (`float` or `Iterable[float]`, *optional*, defaults to `self.image_std`):
277
+ Standard deviation values to be used for normalization. Only has an effect if `do_normalize` is set to
278
+ `True`.
279
+ apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`):
280
+ Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
281
+ ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`):
282
+ The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
283
+ used.
284
+ tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`):
285
+ Any additional custom configuration flags that are forwarded to the `config` parameter when calling
286
+ Tesseract.
287
+ return_tensors (`str` or `TensorType`, *optional*):
288
+ The type of tensors to return. Can be one of:
289
+ - Unset: Return a list of `np.ndarray`.
290
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
291
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
292
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
293
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
294
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
295
+ The channel dimension format for the output image. Can be one of:
296
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
297
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
298
+ input_data_format (`ChannelDimension` or `str`, *optional*):
299
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
300
+ from the input image. Can be one of:
301
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
302
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
303
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
304
+ """
305
+ do_resize = do_resize if do_resize is not None else self.do_resize
306
+ size = size if size is not None else self.size
307
+ size = get_size_dict(size)
308
+ resample = resample if resample is not None else self.resample
309
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
310
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
311
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
312
+ image_mean = image_mean if image_mean is not None else self.image_mean
313
+ image_std = image_std if image_std is not None else self.image_std
314
+ apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr
315
+ ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang
316
+ tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config
317
+ images = make_list_of_images(images)
318
+
319
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
320
+
321
+ if not valid_images(images):
322
+ raise ValueError(
323
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
324
+ "torch.Tensor, tf.Tensor or jax.ndarray."
325
+ )
326
+ validate_preprocess_arguments(
327
+ do_rescale=do_rescale,
328
+ rescale_factor=rescale_factor,
329
+ do_normalize=do_normalize,
330
+ image_mean=image_mean,
331
+ image_std=image_std,
332
+ do_resize=do_resize,
333
+ size=size,
334
+ resample=resample,
335
+ )
336
+
337
+ # All transformations expect numpy arrays.
338
+ images = [to_numpy_array(image) for image in images]
339
+
340
+ if is_scaled_image(images[0]) and do_rescale:
341
+ logger.warning_once(
342
+ "It looks like you are trying to rescale already rescaled images. If the input"
343
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
344
+ )
345
+
346
+ if input_data_format is None:
347
+ # We assume that all images have the same channel dimension format.
348
+ input_data_format = infer_channel_dimension_format(images[0])
349
+
350
+ # Tesseract OCR to get words + normalized bounding boxes
351
+ if apply_ocr:
352
+ requires_backends(self, "pytesseract")
353
+ words_batch = []
354
+ boxes_batch = []
355
+ for image in images:
356
+ words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)
357
+ words_batch.append(words)
358
+ boxes_batch.append(boxes)
359
+
360
+ if do_resize:
361
+ images = [
362
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
363
+ for image in images
364
+ ]
365
+
366
+ if do_rescale:
367
+ images = [
368
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
369
+ for image in images
370
+ ]
371
+
372
+ if do_normalize:
373
+ images = [
374
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
375
+ for image in images
376
+ ]
377
+
378
+ images = [
379
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
380
+ ]
381
+
382
+ data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
383
+
384
+ if apply_ocr:
385
+ data["words"] = words_batch
386
+ data["boxes"] = boxes_batch
387
+ return data
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_layoutlmv3.py ADDED
@@ -0,0 +1,1371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch LayoutLMv3 model."""
16
+
17
+ import collections
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ QuestionAnsweringModelOutput,
31
+ SequenceClassifierOutput,
32
+ TokenClassifierOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import apply_chunking_to_forward
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
37
+ from .configuration_layoutlmv3 import LayoutLMv3Config
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CONFIG_FOR_DOC = "LayoutLMv3Config"
43
+
44
+
45
+ from ..deprecated._archive_maps import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
46
+
47
+
48
+ LAYOUTLMV3_START_DOCSTRING = r"""
49
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
50
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
51
+ behavior.
52
+
53
+ Parameters:
54
+ config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.
55
+ Initializing with a config file does not load the weights associated with the model, only the
56
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
57
+ """
58
+
59
+ LAYOUTLMV3_MODEL_INPUTS_DOCSTRING = r"""
60
+ Args:
61
+ input_ids (`torch.LongTensor` of shape `({0})`):
62
+ Indices of input sequence tokens in the vocabulary.
63
+
64
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
65
+ token. See `pixel_values` for `patch_sequence_length`.
66
+
67
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
68
+ [`PreTrainedTokenizer.__call__`] for details.
69
+
70
+ [What are input IDs?](../glossary#input-ids)
71
+
72
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
73
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
74
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
75
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
76
+ y1) represents the position of the lower right corner.
77
+
78
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
79
+ token. See `pixel_values` for `patch_sequence_length`.
80
+
81
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
82
+ Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
83
+ config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
84
+ config.patch_size) * (width / config.patch_size))`.
85
+
86
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
87
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
88
+
89
+ - 1 for tokens that are **not masked**,
90
+ - 0 for tokens that are **masked**.
91
+
92
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
93
+ token. See `pixel_values` for `patch_sequence_length`.
94
+
95
+ [What are attention masks?](../glossary#attention-mask)
96
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
97
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
98
+ 1]`:
99
+
100
+ - 0 corresponds to a *sentence A* token,
101
+ - 1 corresponds to a *sentence B* token.
102
+
103
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
104
+ token. See `pixel_values` for `patch_sequence_length`.
105
+
106
+ [What are token type IDs?](../glossary#token-type-ids)
107
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
108
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
109
+ config.max_position_embeddings - 1]`.
110
+
111
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
112
+ token. See `pixel_values` for `patch_sequence_length`.
113
+
114
+ [What are position IDs?](../glossary#position-ids)
115
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
116
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
117
+
118
+ - 1 indicates the head is **not masked**,
119
+ - 0 indicates the head is **masked**.
120
+
121
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
122
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
123
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
124
+ model's internal embedding lookup matrix.
125
+ output_attentions (`bool`, *optional*):
126
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
127
+ tensors for more detail.
128
+ output_hidden_states (`bool`, *optional*):
129
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
130
+ more detail.
131
+ return_dict (`bool`, *optional*):
132
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
133
+ """
134
+
135
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING = r"""
136
+ Args:
137
+ input_ids (`torch.LongTensor` of shape `({0})`):
138
+ Indices of input sequence tokens in the vocabulary.
139
+
140
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
141
+ [`PreTrainedTokenizer.__call__`] for details.
142
+
143
+ [What are input IDs?](../glossary#input-ids)
144
+
145
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
146
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
147
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
148
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
149
+ y1) represents the position of the lower right corner.
150
+
151
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
152
+ Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
153
+ config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
154
+ config.patch_size) * (width / config.patch_size))`.
155
+
156
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
157
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
158
+
159
+ - 1 for tokens that are **not masked**,
160
+ - 0 for tokens that are **masked**.
161
+
162
+ [What are attention masks?](../glossary#attention-mask)
163
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
164
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
165
+ 1]`:
166
+
167
+ - 0 corresponds to a *sentence A* token,
168
+ - 1 corresponds to a *sentence B* token.
169
+
170
+ [What are token type IDs?](../glossary#token-type-ids)
171
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
172
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
173
+ config.max_position_embeddings - 1]`.
174
+
175
+ [What are position IDs?](../glossary#position-ids)
176
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
177
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
178
+
179
+ - 1 indicates the head is **not masked**,
180
+ - 0 indicates the head is **masked**.
181
+
182
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
183
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
184
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
185
+ model's internal embedding lookup matrix.
186
+ output_attentions (`bool`, *optional*):
187
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
188
+ tensors for more detail.
189
+ output_hidden_states (`bool`, *optional*):
190
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
191
+ more detail.
192
+ return_dict (`bool`, *optional*):
193
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
194
+ """
195
+
196
+
197
+ class LayoutLMv3PatchEmbeddings(nn.Module):
198
+ """LayoutLMv3 image (patch) embeddings. This class also automatically interpolates the position embeddings for varying
199
+ image sizes."""
200
+
201
+ def __init__(self, config):
202
+ super().__init__()
203
+
204
+ image_size = (
205
+ config.input_size
206
+ if isinstance(config.input_size, collections.abc.Iterable)
207
+ else (config.input_size, config.input_size)
208
+ )
209
+ patch_size = (
210
+ config.patch_size
211
+ if isinstance(config.patch_size, collections.abc.Iterable)
212
+ else (config.patch_size, config.patch_size)
213
+ )
214
+ self.patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
215
+ self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
216
+
217
+ def forward(self, pixel_values, position_embedding=None):
218
+ embeddings = self.proj(pixel_values)
219
+
220
+ if position_embedding is not None:
221
+ # interpolate the position embedding to the corresponding size
222
+ position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1)
223
+ position_embedding = position_embedding.permute(0, 3, 1, 2)
224
+ patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
225
+ position_embedding = F.interpolate(position_embedding, size=(patch_height, patch_width), mode="bicubic")
226
+ embeddings = embeddings + position_embedding
227
+
228
+ embeddings = embeddings.flatten(2).transpose(1, 2)
229
+ return embeddings
230
+
231
+
232
+ class LayoutLMv3TextEmbeddings(nn.Module):
233
+ """
234
+ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
235
+ """
236
+
237
+ def __init__(self, config):
238
+ super().__init__()
239
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
240
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
241
+
242
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
243
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
244
+
245
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
246
+ self.register_buffer(
247
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
248
+ )
249
+
250
+ self.padding_idx = config.pad_token_id
251
+ self.position_embeddings = nn.Embedding(
252
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
253
+ )
254
+
255
+ self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
256
+ self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
257
+ self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
258
+ self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
259
+
260
+ def calculate_spatial_position_embeddings(self, bbox):
261
+ try:
262
+ left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
263
+ upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
264
+ right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
265
+ lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
266
+ except IndexError as e:
267
+ raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
268
+
269
+ h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023))
270
+ w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023))
271
+
272
+ # below is the difference between LayoutLMEmbeddingsV2 (torch.cat) and LayoutLMEmbeddingsV1 (add)
273
+ spatial_position_embeddings = torch.cat(
274
+ [
275
+ left_position_embeddings,
276
+ upper_position_embeddings,
277
+ right_position_embeddings,
278
+ lower_position_embeddings,
279
+ h_position_embeddings,
280
+ w_position_embeddings,
281
+ ],
282
+ dim=-1,
283
+ )
284
+ return spatial_position_embeddings
285
+
286
+ def create_position_ids_from_input_ids(self, input_ids, padding_idx):
287
+ """
288
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
289
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
290
+ """
291
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
292
+ mask = input_ids.ne(padding_idx).int()
293
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
294
+ return incremental_indices.long() + padding_idx
295
+
296
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
297
+ """
298
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
299
+ """
300
+ input_shape = inputs_embeds.size()[:-1]
301
+ sequence_length = input_shape[1]
302
+
303
+ position_ids = torch.arange(
304
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
305
+ )
306
+ return position_ids.unsqueeze(0).expand(input_shape)
307
+
308
+ def forward(
309
+ self,
310
+ input_ids=None,
311
+ bbox=None,
312
+ token_type_ids=None,
313
+ position_ids=None,
314
+ inputs_embeds=None,
315
+ ):
316
+ if position_ids is None:
317
+ if input_ids is not None:
318
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
319
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
320
+ input_ids.device
321
+ )
322
+ else:
323
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
324
+
325
+ if input_ids is not None:
326
+ input_shape = input_ids.size()
327
+ else:
328
+ input_shape = inputs_embeds.size()[:-1]
329
+
330
+ if token_type_ids is None:
331
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
332
+
333
+ if inputs_embeds is None:
334
+ inputs_embeds = self.word_embeddings(input_ids)
335
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
336
+
337
+ embeddings = inputs_embeds + token_type_embeddings
338
+ position_embeddings = self.position_embeddings(position_ids)
339
+ embeddings += position_embeddings
340
+
341
+ spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
342
+
343
+ embeddings = embeddings + spatial_position_embeddings
344
+
345
+ embeddings = self.LayerNorm(embeddings)
346
+ embeddings = self.dropout(embeddings)
347
+ return embeddings
348
+
349
+
350
+ class LayoutLMv3PreTrainedModel(PreTrainedModel):
351
+ """
352
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
353
+ models.
354
+ """
355
+
356
+ config_class = LayoutLMv3Config
357
+ base_model_prefix = "layoutlmv3"
358
+
359
+ def _init_weights(self, module):
360
+ """Initialize the weights"""
361
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
362
+ # Slightly different from the TF version which uses truncated_normal for initialization
363
+ # cf https://github.com/pytorch/pytorch/pull/5617
364
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
365
+ if module.bias is not None:
366
+ module.bias.data.zero_()
367
+ elif isinstance(module, nn.Embedding):
368
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
369
+ if module.padding_idx is not None:
370
+ module.weight.data[module.padding_idx].zero_()
371
+ elif isinstance(module, nn.LayerNorm):
372
+ module.bias.data.zero_()
373
+ module.weight.data.fill_(1.0)
374
+
375
+
376
+ class LayoutLMv3SelfAttention(nn.Module):
377
+ def __init__(self, config):
378
+ super().__init__()
379
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
380
+ raise ValueError(
381
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
382
+ f"heads ({config.num_attention_heads})"
383
+ )
384
+
385
+ self.num_attention_heads = config.num_attention_heads
386
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
387
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
388
+
389
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
390
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
391
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
392
+
393
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
394
+ self.has_relative_attention_bias = config.has_relative_attention_bias
395
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
396
+
397
+ def transpose_for_scores(self, x):
398
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
399
+ x = x.view(*new_x_shape)
400
+ return x.permute(0, 2, 1, 3)
401
+
402
+ def cogview_attention(self, attention_scores, alpha=32):
403
+ """
404
+ https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
405
+ (PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs
406
+ will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,
407
+ cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.
408
+ """
409
+ scaled_attention_scores = attention_scores / alpha
410
+ max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)
411
+ new_attention_scores = (scaled_attention_scores - max_value) * alpha
412
+ return nn.Softmax(dim=-1)(new_attention_scores)
413
+
414
+ def forward(
415
+ self,
416
+ hidden_states,
417
+ attention_mask=None,
418
+ head_mask=None,
419
+ output_attentions=False,
420
+ rel_pos=None,
421
+ rel_2d_pos=None,
422
+ ):
423
+ mixed_query_layer = self.query(hidden_states)
424
+
425
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
426
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
427
+ query_layer = self.transpose_for_scores(mixed_query_layer)
428
+
429
+ # Take the dot product between "query" and "key" to get the raw attention scores.
430
+ # The attention scores QT K/√d could be significantly larger than input elements, and result in overflow.
431
+ # Changing the computational order into QT(K/√d) alleviates the problem. (https://arxiv.org/pdf/2105.13290.pdf)
432
+ attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
433
+
434
+ if self.has_relative_attention_bias and self.has_spatial_attention_bias:
435
+ attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size)
436
+ elif self.has_relative_attention_bias:
437
+ attention_scores += rel_pos / math.sqrt(self.attention_head_size)
438
+
439
+ if attention_mask is not None:
440
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
441
+ attention_scores = attention_scores + attention_mask
442
+
443
+ # Normalize the attention scores to probabilities.
444
+ # Use the trick of the CogView paper to stablize training
445
+ attention_probs = self.cogview_attention(attention_scores)
446
+
447
+ # This is actually dropping out entire tokens to attend to, which might
448
+ # seem a bit unusual, but is taken from the original Transformer paper.
449
+ attention_probs = self.dropout(attention_probs)
450
+
451
+ # Mask heads if we want to
452
+ if head_mask is not None:
453
+ attention_probs = attention_probs * head_mask
454
+
455
+ context_layer = torch.matmul(attention_probs, value_layer)
456
+
457
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
458
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
459
+ context_layer = context_layer.view(*new_context_layer_shape)
460
+
461
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
462
+
463
+ return outputs
464
+
465
+
466
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput
467
+ class LayoutLMv3SelfOutput(nn.Module):
468
+ def __init__(self, config):
469
+ super().__init__()
470
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
471
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
472
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
473
+
474
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
475
+ hidden_states = self.dense(hidden_states)
476
+ hidden_states = self.dropout(hidden_states)
477
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
478
+ return hidden_states
479
+
480
+
481
+ # Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Attention with LayoutLMv2->LayoutLMv3
482
+ class LayoutLMv3Attention(nn.Module):
483
+ def __init__(self, config):
484
+ super().__init__()
485
+ self.self = LayoutLMv3SelfAttention(config)
486
+ self.output = LayoutLMv3SelfOutput(config)
487
+
488
+ def forward(
489
+ self,
490
+ hidden_states,
491
+ attention_mask=None,
492
+ head_mask=None,
493
+ output_attentions=False,
494
+ rel_pos=None,
495
+ rel_2d_pos=None,
496
+ ):
497
+ self_outputs = self.self(
498
+ hidden_states,
499
+ attention_mask,
500
+ head_mask,
501
+ output_attentions,
502
+ rel_pos=rel_pos,
503
+ rel_2d_pos=rel_2d_pos,
504
+ )
505
+ attention_output = self.output(self_outputs[0], hidden_states)
506
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
507
+ return outputs
508
+
509
+
510
+ # Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Layer with LayoutLMv2->LayoutLMv3
511
+ class LayoutLMv3Layer(nn.Module):
512
+ def __init__(self, config):
513
+ super().__init__()
514
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
515
+ self.seq_len_dim = 1
516
+ self.attention = LayoutLMv3Attention(config)
517
+ self.intermediate = LayoutLMv3Intermediate(config)
518
+ self.output = LayoutLMv3Output(config)
519
+
520
+ def forward(
521
+ self,
522
+ hidden_states,
523
+ attention_mask=None,
524
+ head_mask=None,
525
+ output_attentions=False,
526
+ rel_pos=None,
527
+ rel_2d_pos=None,
528
+ ):
529
+ self_attention_outputs = self.attention(
530
+ hidden_states,
531
+ attention_mask,
532
+ head_mask,
533
+ output_attentions=output_attentions,
534
+ rel_pos=rel_pos,
535
+ rel_2d_pos=rel_2d_pos,
536
+ )
537
+ attention_output = self_attention_outputs[0]
538
+
539
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
540
+
541
+ layer_output = apply_chunking_to_forward(
542
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
543
+ )
544
+ outputs = (layer_output,) + outputs
545
+
546
+ return outputs
547
+
548
+ def feed_forward_chunk(self, attention_output):
549
+ intermediate_output = self.intermediate(attention_output)
550
+ layer_output = self.output(intermediate_output, attention_output)
551
+ return layer_output
552
+
553
+
554
+ class LayoutLMv3Encoder(nn.Module):
555
+ def __init__(self, config):
556
+ super().__init__()
557
+ self.config = config
558
+ self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)])
559
+ self.gradient_checkpointing = False
560
+
561
+ self.has_relative_attention_bias = config.has_relative_attention_bias
562
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
563
+
564
+ if self.has_relative_attention_bias:
565
+ self.rel_pos_bins = config.rel_pos_bins
566
+ self.max_rel_pos = config.max_rel_pos
567
+ self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False)
568
+
569
+ if self.has_spatial_attention_bias:
570
+ self.max_rel_2d_pos = config.max_rel_2d_pos
571
+ self.rel_2d_pos_bins = config.rel_2d_pos_bins
572
+ self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
573
+ self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
574
+
575
+ def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
576
+ ret = 0
577
+ if bidirectional:
578
+ num_buckets //= 2
579
+ ret += (relative_position > 0).long() * num_buckets
580
+ n = torch.abs(relative_position)
581
+ else:
582
+ n = torch.max(-relative_position, torch.zeros_like(relative_position))
583
+ # now n is in the range [0, inf)
584
+
585
+ # half of the buckets are for exact increments in positions
586
+ max_exact = num_buckets // 2
587
+ is_small = n < max_exact
588
+
589
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
590
+ val_if_large = max_exact + (
591
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
592
+ ).to(torch.long)
593
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
594
+
595
+ ret += torch.where(is_small, n, val_if_large)
596
+ return ret
597
+
598
+ def _cal_1d_pos_emb(self, position_ids):
599
+ rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
600
+
601
+ rel_pos = self.relative_position_bucket(
602
+ rel_pos_mat,
603
+ num_buckets=self.rel_pos_bins,
604
+ max_distance=self.max_rel_pos,
605
+ )
606
+ rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2)
607
+ rel_pos = rel_pos.contiguous()
608
+ return rel_pos
609
+
610
+ def _cal_2d_pos_emb(self, bbox):
611
+ position_coord_x = bbox[:, :, 0]
612
+ position_coord_y = bbox[:, :, 3]
613
+ rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
614
+ rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
615
+ rel_pos_x = self.relative_position_bucket(
616
+ rel_pos_x_2d_mat,
617
+ num_buckets=self.rel_2d_pos_bins,
618
+ max_distance=self.max_rel_2d_pos,
619
+ )
620
+ rel_pos_y = self.relative_position_bucket(
621
+ rel_pos_y_2d_mat,
622
+ num_buckets=self.rel_2d_pos_bins,
623
+ max_distance=self.max_rel_2d_pos,
624
+ )
625
+ rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2)
626
+ rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2)
627
+ rel_pos_x = rel_pos_x.contiguous()
628
+ rel_pos_y = rel_pos_y.contiguous()
629
+ rel_2d_pos = rel_pos_x + rel_pos_y
630
+ return rel_2d_pos
631
+
632
+ def forward(
633
+ self,
634
+ hidden_states,
635
+ bbox=None,
636
+ attention_mask=None,
637
+ head_mask=None,
638
+ output_attentions=False,
639
+ output_hidden_states=False,
640
+ return_dict=True,
641
+ position_ids=None,
642
+ patch_height=None,
643
+ patch_width=None,
644
+ ):
645
+ all_hidden_states = () if output_hidden_states else None
646
+ all_self_attentions = () if output_attentions else None
647
+
648
+ rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None
649
+ rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None
650
+
651
+ for i, layer_module in enumerate(self.layer):
652
+ if output_hidden_states:
653
+ all_hidden_states = all_hidden_states + (hidden_states,)
654
+
655
+ layer_head_mask = head_mask[i] if head_mask is not None else None
656
+
657
+ if self.gradient_checkpointing and self.training:
658
+ layer_outputs = self._gradient_checkpointing_func(
659
+ layer_module.__call__,
660
+ hidden_states,
661
+ attention_mask,
662
+ layer_head_mask,
663
+ output_attentions,
664
+ rel_pos,
665
+ rel_2d_pos,
666
+ )
667
+ else:
668
+ layer_outputs = layer_module(
669
+ hidden_states,
670
+ attention_mask,
671
+ layer_head_mask,
672
+ output_attentions,
673
+ rel_pos=rel_pos,
674
+ rel_2d_pos=rel_2d_pos,
675
+ )
676
+
677
+ hidden_states = layer_outputs[0]
678
+ if output_attentions:
679
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
680
+
681
+ if output_hidden_states:
682
+ all_hidden_states = all_hidden_states + (hidden_states,)
683
+
684
+ if not return_dict:
685
+ return tuple(
686
+ v
687
+ for v in [
688
+ hidden_states,
689
+ all_hidden_states,
690
+ all_self_attentions,
691
+ ]
692
+ if v is not None
693
+ )
694
+ return BaseModelOutput(
695
+ last_hidden_state=hidden_states,
696
+ hidden_states=all_hidden_states,
697
+ attentions=all_self_attentions,
698
+ )
699
+
700
+
701
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate
702
+ class LayoutLMv3Intermediate(nn.Module):
703
+ def __init__(self, config):
704
+ super().__init__()
705
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
706
+ if isinstance(config.hidden_act, str):
707
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
708
+ else:
709
+ self.intermediate_act_fn = config.hidden_act
710
+
711
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
712
+ hidden_states = self.dense(hidden_states)
713
+ hidden_states = self.intermediate_act_fn(hidden_states)
714
+ return hidden_states
715
+
716
+
717
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaOutput
718
+ class LayoutLMv3Output(nn.Module):
719
+ def __init__(self, config):
720
+ super().__init__()
721
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
722
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
723
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
724
+
725
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
726
+ hidden_states = self.dense(hidden_states)
727
+ hidden_states = self.dropout(hidden_states)
728
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
729
+ return hidden_states
730
+
731
+
732
+ @add_start_docstrings(
733
+ "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.",
734
+ LAYOUTLMV3_START_DOCSTRING,
735
+ )
736
+ class LayoutLMv3Model(LayoutLMv3PreTrainedModel):
737
+ def __init__(self, config):
738
+ super().__init__(config)
739
+ self.config = config
740
+
741
+ if config.text_embed:
742
+ self.embeddings = LayoutLMv3TextEmbeddings(config)
743
+
744
+ if config.visual_embed:
745
+ # use the default pre-training parameters for fine-tuning (e.g., input_size)
746
+ # when the input_size is larger in fine-tuning, we will interpolate the position embeddings in forward
747
+ self.patch_embed = LayoutLMv3PatchEmbeddings(config)
748
+
749
+ size = int(config.input_size / config.patch_size)
750
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
751
+ self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, config.hidden_size))
752
+ self.pos_drop = nn.Dropout(p=0.0)
753
+
754
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
755
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
756
+
757
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
758
+ self.init_visual_bbox(image_size=(size, size))
759
+
760
+ self.norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
761
+
762
+ self.encoder = LayoutLMv3Encoder(config)
763
+
764
+ self.init_weights()
765
+
766
+ def get_input_embeddings(self):
767
+ return self.embeddings.word_embeddings
768
+
769
+ def set_input_embeddings(self, value):
770
+ self.embeddings.word_embeddings = value
771
+
772
+ def _prune_heads(self, heads_to_prune):
773
+ """
774
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
775
+ class PreTrainedModel
776
+ """
777
+ for layer, heads in heads_to_prune.items():
778
+ self.encoder.layer[layer].attention.prune_heads(heads)
779
+
780
+ def init_visual_bbox(self, image_size=(14, 14), max_len=1000):
781
+ """
782
+ Create the bounding boxes for the visual (patch) tokens.
783
+ """
784
+ visual_bbox_x = torch.div(
785
+ torch.arange(0, max_len * (image_size[1] + 1), max_len), image_size[1], rounding_mode="trunc"
786
+ )
787
+ visual_bbox_y = torch.div(
788
+ torch.arange(0, max_len * (image_size[0] + 1), max_len), image_size[0], rounding_mode="trunc"
789
+ )
790
+ visual_bbox = torch.stack(
791
+ [
792
+ visual_bbox_x[:-1].repeat(image_size[0], 1),
793
+ visual_bbox_y[:-1].repeat(image_size[1], 1).transpose(0, 1),
794
+ visual_bbox_x[1:].repeat(image_size[0], 1),
795
+ visual_bbox_y[1:].repeat(image_size[1], 1).transpose(0, 1),
796
+ ],
797
+ dim=-1,
798
+ ).view(-1, 4)
799
+
800
+ cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]])
801
+ self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0)
802
+
803
+ def calculate_visual_bbox(self, device, dtype, batch_size):
804
+ visual_bbox = self.visual_bbox.repeat(batch_size, 1, 1)
805
+ visual_bbox = visual_bbox.to(device).type(dtype)
806
+ return visual_bbox
807
+
808
+ def forward_image(self, pixel_values):
809
+ embeddings = self.patch_embed(pixel_values)
810
+
811
+ # add [CLS] token
812
+ batch_size, seq_len, _ = embeddings.size()
813
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
814
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
815
+
816
+ # add position embeddings
817
+ if self.pos_embed is not None:
818
+ embeddings = embeddings + self.pos_embed
819
+
820
+ embeddings = self.pos_drop(embeddings)
821
+ embeddings = self.norm(embeddings)
822
+
823
+ return embeddings
824
+
825
+ @add_start_docstrings_to_model_forward(
826
+ LAYOUTLMV3_MODEL_INPUTS_DOCSTRING.format("batch_size, token_sequence_length")
827
+ )
828
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
829
+ def forward(
830
+ self,
831
+ input_ids: Optional[torch.LongTensor] = None,
832
+ bbox: Optional[torch.LongTensor] = None,
833
+ attention_mask: Optional[torch.FloatTensor] = None,
834
+ token_type_ids: Optional[torch.LongTensor] = None,
835
+ position_ids: Optional[torch.LongTensor] = None,
836
+ head_mask: Optional[torch.FloatTensor] = None,
837
+ inputs_embeds: Optional[torch.FloatTensor] = None,
838
+ pixel_values: Optional[torch.FloatTensor] = None,
839
+ output_attentions: Optional[bool] = None,
840
+ output_hidden_states: Optional[bool] = None,
841
+ return_dict: Optional[bool] = None,
842
+ ) -> Union[Tuple, BaseModelOutput]:
843
+ r"""
844
+ Returns:
845
+
846
+ Examples:
847
+
848
+ ```python
849
+ >>> from transformers import AutoProcessor, AutoModel
850
+ >>> from datasets import load_dataset
851
+
852
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
853
+ >>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base")
854
+
855
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
856
+ >>> example = dataset[0]
857
+ >>> image = example["image"]
858
+ >>> words = example["tokens"]
859
+ >>> boxes = example["bboxes"]
860
+
861
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
862
+
863
+ >>> outputs = model(**encoding)
864
+ >>> last_hidden_states = outputs.last_hidden_state
865
+ ```"""
866
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
867
+ output_hidden_states = (
868
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
869
+ )
870
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
871
+
872
+ if input_ids is not None:
873
+ input_shape = input_ids.size()
874
+ batch_size, seq_length = input_shape
875
+ device = input_ids.device
876
+ elif inputs_embeds is not None:
877
+ input_shape = inputs_embeds.size()[:-1]
878
+ batch_size, seq_length = input_shape
879
+ device = inputs_embeds.device
880
+ elif pixel_values is not None:
881
+ batch_size = len(pixel_values)
882
+ device = pixel_values.device
883
+ else:
884
+ raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
885
+
886
+ if input_ids is not None or inputs_embeds is not None:
887
+ if attention_mask is None:
888
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
889
+ if token_type_ids is None:
890
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
891
+ if bbox is None:
892
+ bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
893
+
894
+ embedding_output = self.embeddings(
895
+ input_ids=input_ids,
896
+ bbox=bbox,
897
+ position_ids=position_ids,
898
+ token_type_ids=token_type_ids,
899
+ inputs_embeds=inputs_embeds,
900
+ )
901
+
902
+ final_bbox = final_position_ids = None
903
+ patch_height = patch_width = None
904
+ if pixel_values is not None:
905
+ patch_height, patch_width = (
906
+ int(pixel_values.shape[2] / self.config.patch_size),
907
+ int(pixel_values.shape[3] / self.config.patch_size),
908
+ )
909
+ visual_embeddings = self.forward_image(pixel_values)
910
+ visual_attention_mask = torch.ones(
911
+ (batch_size, visual_embeddings.shape[1]), dtype=torch.long, device=device
912
+ )
913
+ if attention_mask is not None:
914
+ attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
915
+ else:
916
+ attention_mask = visual_attention_mask
917
+
918
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
919
+ if self.config.has_spatial_attention_bias:
920
+ visual_bbox = self.calculate_visual_bbox(device, dtype=torch.long, batch_size=batch_size)
921
+ if bbox is not None:
922
+ final_bbox = torch.cat([bbox, visual_bbox], dim=1)
923
+ else:
924
+ final_bbox = visual_bbox
925
+
926
+ visual_position_ids = torch.arange(
927
+ 0, visual_embeddings.shape[1], dtype=torch.long, device=device
928
+ ).repeat(batch_size, 1)
929
+ if input_ids is not None or inputs_embeds is not None:
930
+ position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0)
931
+ position_ids = position_ids.expand(input_shape)
932
+ final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
933
+ else:
934
+ final_position_ids = visual_position_ids
935
+
936
+ if input_ids is not None or inputs_embeds is not None:
937
+ embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)
938
+ else:
939
+ embedding_output = visual_embeddings
940
+
941
+ embedding_output = self.LayerNorm(embedding_output)
942
+ embedding_output = self.dropout(embedding_output)
943
+ elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
944
+ if self.config.has_spatial_attention_bias:
945
+ final_bbox = bbox
946
+ if self.config.has_relative_attention_bias:
947
+ position_ids = self.embeddings.position_ids[:, : input_shape[1]]
948
+ position_ids = position_ids.expand_as(input_ids)
949
+ final_position_ids = position_ids
950
+
951
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
952
+ attention_mask, None, device, dtype=embedding_output.dtype
953
+ )
954
+
955
+ # Prepare head mask if needed
956
+ # 1.0 in head_mask indicate we keep the head
957
+ # attention_probs has shape bsz x n_heads x N x N
958
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
959
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
960
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
961
+
962
+ encoder_outputs = self.encoder(
963
+ embedding_output,
964
+ bbox=final_bbox,
965
+ position_ids=final_position_ids,
966
+ attention_mask=extended_attention_mask,
967
+ head_mask=head_mask,
968
+ output_attentions=output_attentions,
969
+ output_hidden_states=output_hidden_states,
970
+ return_dict=return_dict,
971
+ patch_height=patch_height,
972
+ patch_width=patch_width,
973
+ )
974
+
975
+ sequence_output = encoder_outputs[0]
976
+
977
+ if not return_dict:
978
+ return (sequence_output,) + encoder_outputs[1:]
979
+
980
+ return BaseModelOutput(
981
+ last_hidden_state=sequence_output,
982
+ hidden_states=encoder_outputs.hidden_states,
983
+ attentions=encoder_outputs.attentions,
984
+ )
985
+
986
+
987
+ class LayoutLMv3ClassificationHead(nn.Module):
988
+ """
989
+ Head for sentence-level classification tasks. Reference: RobertaClassificationHead
990
+ """
991
+
992
+ def __init__(self, config, pool_feature=False):
993
+ super().__init__()
994
+ self.pool_feature = pool_feature
995
+ if pool_feature:
996
+ self.dense = nn.Linear(config.hidden_size * 3, config.hidden_size)
997
+ else:
998
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
999
+ classifier_dropout = (
1000
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1001
+ )
1002
+ self.dropout = nn.Dropout(classifier_dropout)
1003
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1004
+
1005
+ def forward(self, x):
1006
+ x = self.dropout(x)
1007
+ x = self.dense(x)
1008
+ x = torch.tanh(x)
1009
+ x = self.dropout(x)
1010
+ x = self.out_proj(x)
1011
+ return x
1012
+
1013
+
1014
+ @add_start_docstrings(
1015
+ """
1016
+ LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.
1017
+ for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),
1018
+ [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and
1019
+ [Kleister-NDA](https://github.com/applicaai/kleister-nda).
1020
+ """,
1021
+ LAYOUTLMV3_START_DOCSTRING,
1022
+ )
1023
+ class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel):
1024
+ def __init__(self, config):
1025
+ super().__init__(config)
1026
+ self.num_labels = config.num_labels
1027
+
1028
+ self.layoutlmv3 = LayoutLMv3Model(config)
1029
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1030
+ if config.num_labels < 10:
1031
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1032
+ else:
1033
+ self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
1034
+
1035
+ self.init_weights()
1036
+
1037
+ @add_start_docstrings_to_model_forward(
1038
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1039
+ )
1040
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1041
+ def forward(
1042
+ self,
1043
+ input_ids: Optional[torch.LongTensor] = None,
1044
+ bbox: Optional[torch.LongTensor] = None,
1045
+ attention_mask: Optional[torch.FloatTensor] = None,
1046
+ token_type_ids: Optional[torch.LongTensor] = None,
1047
+ position_ids: Optional[torch.LongTensor] = None,
1048
+ head_mask: Optional[torch.FloatTensor] = None,
1049
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1050
+ labels: Optional[torch.LongTensor] = None,
1051
+ output_attentions: Optional[bool] = None,
1052
+ output_hidden_states: Optional[bool] = None,
1053
+ return_dict: Optional[bool] = None,
1054
+ pixel_values: Optional[torch.LongTensor] = None,
1055
+ ) -> Union[Tuple, TokenClassifierOutput]:
1056
+ r"""
1057
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1058
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1059
+
1060
+ Returns:
1061
+
1062
+ Examples:
1063
+
1064
+ ```python
1065
+ >>> from transformers import AutoProcessor, AutoModelForTokenClassification
1066
+ >>> from datasets import load_dataset
1067
+
1068
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1069
+ >>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
1070
+
1071
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1072
+ >>> example = dataset[0]
1073
+ >>> image = example["image"]
1074
+ >>> words = example["tokens"]
1075
+ >>> boxes = example["bboxes"]
1076
+ >>> word_labels = example["ner_tags"]
1077
+
1078
+ >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
1079
+
1080
+ >>> outputs = model(**encoding)
1081
+ >>> loss = outputs.loss
1082
+ >>> logits = outputs.logits
1083
+ ```"""
1084
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1085
+
1086
+ outputs = self.layoutlmv3(
1087
+ input_ids,
1088
+ bbox=bbox,
1089
+ attention_mask=attention_mask,
1090
+ token_type_ids=token_type_ids,
1091
+ position_ids=position_ids,
1092
+ head_mask=head_mask,
1093
+ inputs_embeds=inputs_embeds,
1094
+ output_attentions=output_attentions,
1095
+ output_hidden_states=output_hidden_states,
1096
+ return_dict=return_dict,
1097
+ pixel_values=pixel_values,
1098
+ )
1099
+ if input_ids is not None:
1100
+ input_shape = input_ids.size()
1101
+ else:
1102
+ input_shape = inputs_embeds.size()[:-1]
1103
+
1104
+ seq_length = input_shape[1]
1105
+ # only take the text part of the output representations
1106
+ sequence_output = outputs[0][:, :seq_length]
1107
+ sequence_output = self.dropout(sequence_output)
1108
+ logits = self.classifier(sequence_output)
1109
+
1110
+ loss = None
1111
+ if labels is not None:
1112
+ loss_fct = CrossEntropyLoss()
1113
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1114
+
1115
+ if not return_dict:
1116
+ output = (logits,) + outputs[1:]
1117
+ return ((loss,) + output) if loss is not None else output
1118
+
1119
+ return TokenClassifierOutput(
1120
+ loss=loss,
1121
+ logits=logits,
1122
+ hidden_states=outputs.hidden_states,
1123
+ attentions=outputs.attentions,
1124
+ )
1125
+
1126
+
1127
+ @add_start_docstrings(
1128
+ """
1129
+ LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as
1130
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
1131
+ compute `span start logits` and `span end logits`).
1132
+ """,
1133
+ LAYOUTLMV3_START_DOCSTRING,
1134
+ )
1135
+ class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel):
1136
+ def __init__(self, config):
1137
+ super().__init__(config)
1138
+ self.num_labels = config.num_labels
1139
+
1140
+ self.layoutlmv3 = LayoutLMv3Model(config)
1141
+ self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False)
1142
+
1143
+ self.init_weights()
1144
+
1145
+ @add_start_docstrings_to_model_forward(
1146
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1147
+ )
1148
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1149
+ def forward(
1150
+ self,
1151
+ input_ids: Optional[torch.LongTensor] = None,
1152
+ attention_mask: Optional[torch.FloatTensor] = None,
1153
+ token_type_ids: Optional[torch.LongTensor] = None,
1154
+ position_ids: Optional[torch.LongTensor] = None,
1155
+ head_mask: Optional[torch.FloatTensor] = None,
1156
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1157
+ start_positions: Optional[torch.LongTensor] = None,
1158
+ end_positions: Optional[torch.LongTensor] = None,
1159
+ output_attentions: Optional[bool] = None,
1160
+ output_hidden_states: Optional[bool] = None,
1161
+ return_dict: Optional[bool] = None,
1162
+ bbox: Optional[torch.LongTensor] = None,
1163
+ pixel_values: Optional[torch.LongTensor] = None,
1164
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1165
+ r"""
1166
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1167
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1168
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1169
+ are not taken into account for computing the loss.
1170
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1171
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1172
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1173
+ are not taken into account for computing the loss.
1174
+
1175
+ Returns:
1176
+
1177
+ Examples:
1178
+
1179
+ ```python
1180
+ >>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
1181
+ >>> from datasets import load_dataset
1182
+ >>> import torch
1183
+
1184
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1185
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
1186
+
1187
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1188
+ >>> example = dataset[0]
1189
+ >>> image = example["image"]
1190
+ >>> question = "what's his name?"
1191
+ >>> words = example["tokens"]
1192
+ >>> boxes = example["bboxes"]
1193
+
1194
+ >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
1195
+ >>> start_positions = torch.tensor([1])
1196
+ >>> end_positions = torch.tensor([3])
1197
+
1198
+ >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
1199
+ >>> loss = outputs.loss
1200
+ >>> start_scores = outputs.start_logits
1201
+ >>> end_scores = outputs.end_logits
1202
+ ```"""
1203
+
1204
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1205
+
1206
+ outputs = self.layoutlmv3(
1207
+ input_ids,
1208
+ attention_mask=attention_mask,
1209
+ token_type_ids=token_type_ids,
1210
+ position_ids=position_ids,
1211
+ head_mask=head_mask,
1212
+ inputs_embeds=inputs_embeds,
1213
+ output_attentions=output_attentions,
1214
+ output_hidden_states=output_hidden_states,
1215
+ return_dict=return_dict,
1216
+ bbox=bbox,
1217
+ pixel_values=pixel_values,
1218
+ )
1219
+
1220
+ sequence_output = outputs[0]
1221
+
1222
+ logits = self.qa_outputs(sequence_output)
1223
+ start_logits, end_logits = logits.split(1, dim=-1)
1224
+ start_logits = start_logits.squeeze(-1).contiguous()
1225
+ end_logits = end_logits.squeeze(-1).contiguous()
1226
+
1227
+ total_loss = None
1228
+ if start_positions is not None and end_positions is not None:
1229
+ # If we are on multi-GPU, split add a dimension
1230
+ if len(start_positions.size()) > 1:
1231
+ start_positions = start_positions.squeeze(-1)
1232
+ if len(end_positions.size()) > 1:
1233
+ end_positions = end_positions.squeeze(-1)
1234
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1235
+ ignored_index = start_logits.size(1)
1236
+ start_positions = start_positions.clamp(0, ignored_index)
1237
+ end_positions = end_positions.clamp(0, ignored_index)
1238
+
1239
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1240
+ start_loss = loss_fct(start_logits, start_positions)
1241
+ end_loss = loss_fct(end_logits, end_positions)
1242
+ total_loss = (start_loss + end_loss) / 2
1243
+
1244
+ if not return_dict:
1245
+ output = (start_logits, end_logits) + outputs[1:]
1246
+ return ((total_loss,) + output) if total_loss is not None else output
1247
+
1248
+ return QuestionAnsweringModelOutput(
1249
+ loss=total_loss,
1250
+ start_logits=start_logits,
1251
+ end_logits=end_logits,
1252
+ hidden_states=outputs.hidden_states,
1253
+ attentions=outputs.attentions,
1254
+ )
1255
+
1256
+
1257
+ @add_start_docstrings(
1258
+ """
1259
+ LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
1260
+ [CLS] token) e.g. for document image classification tasks such as the
1261
+ [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
1262
+ """,
1263
+ LAYOUTLMV3_START_DOCSTRING,
1264
+ )
1265
+ class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel):
1266
+ def __init__(self, config):
1267
+ super().__init__(config)
1268
+ self.num_labels = config.num_labels
1269
+ self.config = config
1270
+ self.layoutlmv3 = LayoutLMv3Model(config)
1271
+ self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
1272
+
1273
+ self.init_weights()
1274
+
1275
+ @add_start_docstrings_to_model_forward(
1276
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1277
+ )
1278
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1279
+ def forward(
1280
+ self,
1281
+ input_ids: Optional[torch.LongTensor] = None,
1282
+ attention_mask: Optional[torch.FloatTensor] = None,
1283
+ token_type_ids: Optional[torch.LongTensor] = None,
1284
+ position_ids: Optional[torch.LongTensor] = None,
1285
+ head_mask: Optional[torch.FloatTensor] = None,
1286
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1287
+ labels: Optional[torch.LongTensor] = None,
1288
+ output_attentions: Optional[bool] = None,
1289
+ output_hidden_states: Optional[bool] = None,
1290
+ return_dict: Optional[bool] = None,
1291
+ bbox: Optional[torch.LongTensor] = None,
1292
+ pixel_values: Optional[torch.LongTensor] = None,
1293
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1294
+ """
1295
+ Returns:
1296
+
1297
+ Examples:
1298
+
1299
+ ```python
1300
+ >>> from transformers import AutoProcessor, AutoModelForSequenceClassification
1301
+ >>> from datasets import load_dataset
1302
+ >>> import torch
1303
+
1304
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1305
+ >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
1306
+
1307
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1308
+ >>> example = dataset[0]
1309
+ >>> image = example["image"]
1310
+ >>> words = example["tokens"]
1311
+ >>> boxes = example["bboxes"]
1312
+
1313
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
1314
+ >>> sequence_label = torch.tensor([1])
1315
+
1316
+ >>> outputs = model(**encoding, labels=sequence_label)
1317
+ >>> loss = outputs.loss
1318
+ >>> logits = outputs.logits
1319
+ ```"""
1320
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1321
+
1322
+ outputs = self.layoutlmv3(
1323
+ input_ids,
1324
+ attention_mask=attention_mask,
1325
+ token_type_ids=token_type_ids,
1326
+ position_ids=position_ids,
1327
+ head_mask=head_mask,
1328
+ inputs_embeds=inputs_embeds,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ return_dict=return_dict,
1332
+ bbox=bbox,
1333
+ pixel_values=pixel_values,
1334
+ )
1335
+
1336
+ sequence_output = outputs[0][:, 0, :]
1337
+ logits = self.classifier(sequence_output)
1338
+
1339
+ loss = None
1340
+ if labels is not None:
1341
+ if self.config.problem_type is None:
1342
+ if self.num_labels == 1:
1343
+ self.config.problem_type = "regression"
1344
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1345
+ self.config.problem_type = "single_label_classification"
1346
+ else:
1347
+ self.config.problem_type = "multi_label_classification"
1348
+
1349
+ if self.config.problem_type == "regression":
1350
+ loss_fct = MSELoss()
1351
+ if self.num_labels == 1:
1352
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1353
+ else:
1354
+ loss = loss_fct(logits, labels)
1355
+ elif self.config.problem_type == "single_label_classification":
1356
+ loss_fct = CrossEntropyLoss()
1357
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1358
+ elif self.config.problem_type == "multi_label_classification":
1359
+ loss_fct = BCEWithLogitsLoss()
1360
+ loss = loss_fct(logits, labels)
1361
+
1362
+ if not return_dict:
1363
+ output = (logits,) + outputs[1:]
1364
+ return ((loss,) + output) if loss is not None else output
1365
+
1366
+ return SequenceClassifierOutput(
1367
+ loss=loss,
1368
+ logits=logits,
1369
+ hidden_states=outputs.hidden_states,
1370
+ attentions=outputs.attentions,
1371
+ )
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py ADDED
@@ -0,0 +1,1778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TF 2.0 LayoutLMv3 model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFQuestionAnsweringModelOutput,
30
+ TFSequenceClassifierOutput,
31
+ TFTokenClassifierOutput,
32
+ )
33
+ from ...modeling_tf_utils import (
34
+ TFPreTrainedModel,
35
+ TFQuestionAnsweringLoss,
36
+ TFSequenceClassificationLoss,
37
+ TFTokenClassificationLoss,
38
+ get_initializer,
39
+ keras,
40
+ keras_serializable,
41
+ unpack_inputs,
42
+ )
43
+ from ...tf_utils import check_embeddings_within_bounds
44
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
45
+ from .configuration_layoutlmv3 import LayoutLMv3Config
46
+
47
+
48
+ _CONFIG_FOR_DOC = "LayoutLMv3Config"
49
+
50
+ _DUMMY_INPUT_IDS = [
51
+ [7, 6, 1],
52
+ [1, 2, 0],
53
+ ]
54
+
55
+ _DUMMY_BBOX = [
56
+ [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
57
+ [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
58
+ ]
59
+
60
+
61
+ from ..deprecated._archive_maps import TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
62
+
63
+
64
+ LARGE_NEGATIVE = -1e8
65
+
66
+
67
+ class TFLayoutLMv3PatchEmbeddings(keras.layers.Layer):
68
+ """LayoutLMv3 image (patch) embeddings."""
69
+
70
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
71
+ super().__init__(**kwargs)
72
+ patch_sizes = (
73
+ config.patch_size
74
+ if isinstance(config.patch_size, collections.abc.Iterable)
75
+ else (config.patch_size, config.patch_size)
76
+ )
77
+ self.proj = keras.layers.Conv2D(
78
+ filters=config.hidden_size,
79
+ kernel_size=patch_sizes,
80
+ strides=patch_sizes,
81
+ padding="valid",
82
+ data_format="channels_last",
83
+ use_bias=True,
84
+ kernel_initializer=get_initializer(config.initializer_range),
85
+ name="proj",
86
+ )
87
+ self.hidden_size = config.hidden_size
88
+ self.num_patches = (config.input_size**2) // (patch_sizes[0] * patch_sizes[1])
89
+ self.config = config
90
+
91
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
92
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
93
+ # So change the input format from `NCHW` to `NHWC`.
94
+ pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1])
95
+
96
+ embeddings = self.proj(pixel_values)
97
+ embeddings = tf.reshape(embeddings, (-1, self.num_patches, self.hidden_size))
98
+ return embeddings
99
+
100
+ def build(self, input_shape=None):
101
+ if self.built:
102
+ return
103
+ self.built = True
104
+ if getattr(self, "proj", None) is not None:
105
+ with tf.name_scope(self.proj.name):
106
+ self.proj.build([None, None, None, self.config.num_channels])
107
+
108
+
109
+ class TFLayoutLMv3TextEmbeddings(keras.layers.Layer):
110
+ """
111
+ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
112
+ """
113
+
114
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
115
+ super().__init__(**kwargs)
116
+ self.word_embeddings = keras.layers.Embedding(
117
+ config.vocab_size,
118
+ config.hidden_size,
119
+ embeddings_initializer=get_initializer(config.initializer_range),
120
+ name="word_embeddings",
121
+ )
122
+ self.token_type_embeddings = keras.layers.Embedding(
123
+ config.type_vocab_size,
124
+ config.hidden_size,
125
+ embeddings_initializer=get_initializer(config.initializer_range),
126
+ name="token_type_embeddings",
127
+ )
128
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
129
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
130
+ self.padding_token_index = config.pad_token_id
131
+ self.position_embeddings = keras.layers.Embedding(
132
+ config.max_position_embeddings,
133
+ config.hidden_size,
134
+ embeddings_initializer=get_initializer(config.initializer_range),
135
+ name="position_embeddings",
136
+ )
137
+ self.x_position_embeddings = keras.layers.Embedding(
138
+ config.max_2d_position_embeddings,
139
+ config.coordinate_size,
140
+ embeddings_initializer=get_initializer(config.initializer_range),
141
+ name="x_position_embeddings",
142
+ )
143
+ self.y_position_embeddings = keras.layers.Embedding(
144
+ config.max_2d_position_embeddings,
145
+ config.coordinate_size,
146
+ embeddings_initializer=get_initializer(config.initializer_range),
147
+ name="y_position_embeddings",
148
+ )
149
+ self.h_position_embeddings = keras.layers.Embedding(
150
+ config.max_2d_position_embeddings,
151
+ config.shape_size,
152
+ embeddings_initializer=get_initializer(config.initializer_range),
153
+ name="h_position_embeddings",
154
+ )
155
+ self.w_position_embeddings = keras.layers.Embedding(
156
+ config.max_2d_position_embeddings,
157
+ config.shape_size,
158
+ embeddings_initializer=get_initializer(config.initializer_range),
159
+ name="w_position_embeddings",
160
+ )
161
+ self.max_2d_positions = config.max_2d_position_embeddings
162
+ self.config = config
163
+
164
+ def calculate_spatial_position_embeddings(self, bbox: tf.Tensor) -> tf.Tensor:
165
+ try:
166
+ left_position_ids = bbox[:, :, 0]
167
+ upper_position_ids = bbox[:, :, 1]
168
+ right_position_ids = bbox[:, :, 2]
169
+ lower_position_ids = bbox[:, :, 3]
170
+ except IndexError as exception:
171
+ raise IndexError("Bounding box is not of shape (batch_size, seq_length, 4).") from exception
172
+
173
+ try:
174
+ left_position_embeddings = self.x_position_embeddings(left_position_ids)
175
+ upper_position_embeddings = self.y_position_embeddings(upper_position_ids)
176
+ right_position_embeddings = self.x_position_embeddings(right_position_ids)
177
+ lower_position_embeddings = self.y_position_embeddings(lower_position_ids)
178
+ except IndexError as exception:
179
+ raise IndexError(
180
+ f"The `bbox` coordinate values should be within 0-{self.max_2d_positions} range."
181
+ ) from exception
182
+
183
+ max_position_id = self.max_2d_positions - 1
184
+ h_position_embeddings = self.h_position_embeddings(
185
+ tf.clip_by_value(bbox[:, :, 3] - bbox[:, :, 1], 0, max_position_id)
186
+ )
187
+ w_position_embeddings = self.w_position_embeddings(
188
+ tf.clip_by_value(bbox[:, :, 2] - bbox[:, :, 0], 0, max_position_id)
189
+ )
190
+
191
+ # LayoutLMv1 sums the spatial embeddings, but LayoutLMv3 concatenates them.
192
+ spatial_position_embeddings = tf.concat(
193
+ [
194
+ left_position_embeddings,
195
+ upper_position_embeddings,
196
+ right_position_embeddings,
197
+ lower_position_embeddings,
198
+ h_position_embeddings,
199
+ w_position_embeddings,
200
+ ],
201
+ axis=-1,
202
+ )
203
+ return spatial_position_embeddings
204
+
205
+ def create_position_ids_from_inputs_embeds(self, inputs_embds: tf.Tensor) -> tf.Tensor:
206
+ """
207
+ We are provided embeddings directly. We cannot infer which are padded, so just generate sequential position
208
+ ids.
209
+ """
210
+ input_shape = tf.shape(inputs_embds)
211
+ sequence_length = input_shape[1]
212
+ start_index = self.padding_token_index + 1
213
+ end_index = self.padding_token_index + sequence_length + 1
214
+ position_ids = tf.range(start_index, end_index, dtype=tf.int32)
215
+ batch_size = input_shape[0]
216
+ position_ids = tf.reshape(position_ids, (1, sequence_length))
217
+ position_ids = tf.tile(position_ids, (batch_size, 1))
218
+ return position_ids
219
+
220
+ def create_position_ids_from_input_ids(self, input_ids: tf.Tensor) -> tf.Tensor:
221
+ """
222
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_token_index + 1.
223
+ """
224
+ mask = tf.cast(tf.not_equal(input_ids, self.padding_token_index), input_ids.dtype)
225
+ position_ids = tf.cumsum(mask, axis=1) * mask
226
+ position_ids = position_ids + self.padding_token_index
227
+ return position_ids
228
+
229
+ def create_position_ids(self, input_ids: tf.Tensor, inputs_embeds: tf.Tensor) -> tf.Tensor:
230
+ if input_ids is None:
231
+ return self.create_position_ids_from_inputs_embeds(inputs_embeds)
232
+ else:
233
+ return self.create_position_ids_from_input_ids(input_ids)
234
+
235
+ def call(
236
+ self,
237
+ input_ids: tf.Tensor | None = None,
238
+ bbox: tf.Tensor = None,
239
+ token_type_ids: tf.Tensor | None = None,
240
+ position_ids: tf.Tensor | None = None,
241
+ inputs_embeds: tf.Tensor | None = None,
242
+ training: bool = False,
243
+ ) -> tf.Tensor:
244
+ if position_ids is None:
245
+ position_ids = self.create_position_ids(input_ids, inputs_embeds)
246
+
247
+ if input_ids is not None:
248
+ input_shape = tf.shape(input_ids)
249
+ else:
250
+ input_shape = tf.shape(inputs_embeds)[:-1]
251
+
252
+ if token_type_ids is None:
253
+ token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype)
254
+
255
+ if inputs_embeds is None:
256
+ check_embeddings_within_bounds(input_ids, self.word_embeddings.input_dim)
257
+ inputs_embeds = self.word_embeddings(input_ids)
258
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
259
+
260
+ embeddings = inputs_embeds + token_type_embeddings
261
+ position_embeddings = self.position_embeddings(position_ids)
262
+ embeddings += position_embeddings
263
+
264
+ spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
265
+
266
+ embeddings += spatial_position_embeddings
267
+
268
+ embeddings = self.LayerNorm(embeddings)
269
+ embeddings = self.dropout(embeddings, training=training)
270
+ return embeddings
271
+
272
+ def build(self, input_shape=None):
273
+ if self.built:
274
+ return
275
+ self.built = True
276
+ if getattr(self, "word_embeddings", None) is not None:
277
+ with tf.name_scope(self.word_embeddings.name):
278
+ self.word_embeddings.build(None)
279
+ if getattr(self, "token_type_embeddings", None) is not None:
280
+ with tf.name_scope(self.token_type_embeddings.name):
281
+ self.token_type_embeddings.build(None)
282
+ if getattr(self, "LayerNorm", None) is not None:
283
+ with tf.name_scope(self.LayerNorm.name):
284
+ self.LayerNorm.build([None, None, self.config.hidden_size])
285
+ if getattr(self, "position_embeddings", None) is not None:
286
+ with tf.name_scope(self.position_embeddings.name):
287
+ self.position_embeddings.build(None)
288
+ if getattr(self, "x_position_embeddings", None) is not None:
289
+ with tf.name_scope(self.x_position_embeddings.name):
290
+ self.x_position_embeddings.build(None)
291
+ if getattr(self, "y_position_embeddings", None) is not None:
292
+ with tf.name_scope(self.y_position_embeddings.name):
293
+ self.y_position_embeddings.build(None)
294
+ if getattr(self, "h_position_embeddings", None) is not None:
295
+ with tf.name_scope(self.h_position_embeddings.name):
296
+ self.h_position_embeddings.build(None)
297
+ if getattr(self, "w_position_embeddings", None) is not None:
298
+ with tf.name_scope(self.w_position_embeddings.name):
299
+ self.w_position_embeddings.build(None)
300
+
301
+
302
+ class TFLayoutLMv3SelfAttention(keras.layers.Layer):
303
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
304
+ super().__init__(**kwargs)
305
+ if config.hidden_size % config.num_attention_heads != 0:
306
+ raise ValueError(
307
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
308
+ f"heads ({config.num_attention_heads})"
309
+ )
310
+
311
+ self.num_attention_heads = config.num_attention_heads
312
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
313
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
314
+ self.attention_score_normaliser = math.sqrt(self.attention_head_size)
315
+
316
+ self.query = keras.layers.Dense(
317
+ self.all_head_size,
318
+ kernel_initializer=get_initializer(config.initializer_range),
319
+ name="query",
320
+ )
321
+ self.key = keras.layers.Dense(
322
+ self.all_head_size,
323
+ kernel_initializer=get_initializer(config.initializer_range),
324
+ name="key",
325
+ )
326
+ self.value = keras.layers.Dense(
327
+ self.all_head_size,
328
+ kernel_initializer=get_initializer(config.initializer_range),
329
+ name="value",
330
+ )
331
+
332
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
333
+ self.has_relative_attention_bias = config.has_relative_attention_bias
334
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
335
+ self.config = config
336
+
337
+ def transpose_for_scores(self, x: tf.Tensor):
338
+ shape = tf.shape(x)
339
+ new_shape = (
340
+ shape[0], # batch_size
341
+ shape[1], # seq_length
342
+ self.num_attention_heads,
343
+ self.attention_head_size,
344
+ )
345
+ x = tf.reshape(x, new_shape)
346
+ return tf.transpose(x, perm=[0, 2, 1, 3]) # batch_size, num_heads, seq_length, attention_head_size
347
+
348
+ def cogview_attention(self, attention_scores: tf.Tensor, alpha: Union[float, int] = 32):
349
+ """
350
+ https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
351
+ (PB-Relax). A replacement of the original keras.layers.Softmax(axis=-1)(attention_scores). Seems the new
352
+ attention_probs will result in a slower speed and a little bias. Can use
353
+ tf.debugging.assert_near(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The
354
+ smaller atol (e.g., 1e-08), the better.
355
+ """
356
+ scaled_attention_scores = attention_scores / alpha
357
+ max_value = tf.expand_dims(tf.reduce_max(scaled_attention_scores, axis=-1), axis=-1)
358
+ new_attention_scores = (scaled_attention_scores - max_value) * alpha
359
+ return tf.math.softmax(new_attention_scores, axis=-1)
360
+
361
+ def call(
362
+ self,
363
+ hidden_states: tf.Tensor,
364
+ attention_mask: tf.Tensor | None,
365
+ head_mask: tf.Tensor | None,
366
+ output_attentions: bool,
367
+ rel_pos: tf.Tensor | None = None,
368
+ rel_2d_pos: tf.Tensor | None = None,
369
+ training: bool = False,
370
+ ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
371
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
372
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
373
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
374
+
375
+ # Take the dot product between "query" and "key" to get the raw attention scores.
376
+ normalised_query_layer = query_layer / self.attention_score_normaliser
377
+ transposed_key_layer = tf.transpose(
378
+ key_layer, perm=[0, 1, 3, 2]
379
+ ) # batch_size, num_heads, attention_head_size, seq_length
380
+ attention_scores = tf.matmul(normalised_query_layer, transposed_key_layer)
381
+
382
+ if self.has_relative_attention_bias and self.has_spatial_attention_bias:
383
+ attention_scores += (rel_pos + rel_2d_pos) / self.attention_score_normaliser
384
+ elif self.has_relative_attention_bias:
385
+ attention_scores += rel_pos / self.attention_score_normaliser
386
+
387
+ if attention_mask is not None:
388
+ # Apply the attention mask (is precomputed for all layers in TFLayoutLMv3Model call() function)
389
+ attention_scores += attention_mask
390
+
391
+ # Normalize the attention scores to probabilities.
392
+ # Use the trick of CogView paper to stabilize training.
393
+ attention_probs = self.cogview_attention(attention_scores)
394
+
395
+ attention_probs = self.dropout(attention_probs, training=training)
396
+
397
+ # Mask heads if we want to.
398
+ if head_mask is not None:
399
+ attention_probs = attention_probs * head_mask
400
+
401
+ context_layer = tf.matmul(attention_probs, value_layer)
402
+ context_layer = tf.transpose(
403
+ context_layer, perm=[0, 2, 1, 3]
404
+ ) # batch_size, seq_length, num_heads, attention_head_size
405
+ shape = tf.shape(context_layer)
406
+ context_layer = tf.reshape(
407
+ context_layer, (shape[0], shape[1], self.all_head_size)
408
+ ) # batch_size, seq_length, num_heads * attention_head_size
409
+
410
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
411
+
412
+ return outputs
413
+
414
+ def build(self, input_shape=None):
415
+ if self.built:
416
+ return
417
+ self.built = True
418
+ if getattr(self, "query", None) is not None:
419
+ with tf.name_scope(self.query.name):
420
+ self.query.build([None, None, self.config.hidden_size])
421
+ if getattr(self, "key", None) is not None:
422
+ with tf.name_scope(self.key.name):
423
+ self.key.build([None, None, self.config.hidden_size])
424
+ if getattr(self, "value", None) is not None:
425
+ with tf.name_scope(self.value.name):
426
+ self.value.build([None, None, self.config.hidden_size])
427
+
428
+
429
+ # Copied from models.roberta.modeling_tf_roberta.TFRobertaSelfOutput
430
+ class TFLayoutLMv3SelfOutput(keras.layers.Layer):
431
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
432
+ super().__init__(**kwargs)
433
+
434
+ self.dense = keras.layers.Dense(
435
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
436
+ )
437
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
438
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
439
+ self.config = config
440
+
441
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
442
+ hidden_states = self.dense(inputs=hidden_states)
443
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
444
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
445
+
446
+ return hidden_states
447
+
448
+ def build(self, input_shape=None):
449
+ if self.built:
450
+ return
451
+ self.built = True
452
+ if getattr(self, "dense", None) is not None:
453
+ with tf.name_scope(self.dense.name):
454
+ self.dense.build([None, None, self.config.hidden_size])
455
+ if getattr(self, "LayerNorm", None) is not None:
456
+ with tf.name_scope(self.LayerNorm.name):
457
+ self.LayerNorm.build([None, None, self.config.hidden_size])
458
+
459
+
460
+ class TFLayoutLMv3Attention(keras.layers.Layer):
461
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
462
+ super().__init__(**kwargs)
463
+ self.self_attention = TFLayoutLMv3SelfAttention(config, name="self")
464
+ self.self_output = TFLayoutLMv3SelfOutput(config, name="output")
465
+
466
+ def call(
467
+ self,
468
+ hidden_states: tf.Tensor,
469
+ attention_mask: tf.Tensor | None,
470
+ head_mask: tf.Tensor | None,
471
+ output_attentions: bool,
472
+ rel_pos: tf.Tensor | None = None,
473
+ rel_2d_pos: tf.Tensor | None = None,
474
+ training: bool = False,
475
+ ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
476
+ self_outputs = self.self_attention(
477
+ hidden_states,
478
+ attention_mask,
479
+ head_mask,
480
+ output_attentions,
481
+ rel_pos,
482
+ rel_2d_pos,
483
+ training=training,
484
+ )
485
+ attention_output = self.self_output(self_outputs[0], hidden_states, training=training)
486
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
487
+ return outputs
488
+
489
+ def build(self, input_shape=None):
490
+ if self.built:
491
+ return
492
+ self.built = True
493
+ if getattr(self, "self_attention", None) is not None:
494
+ with tf.name_scope(self.self_attention.name):
495
+ self.self_attention.build(None)
496
+ if getattr(self, "self_output", None) is not None:
497
+ with tf.name_scope(self.self_output.name):
498
+ self.self_output.build(None)
499
+
500
+
501
+ # Copied from models.roberta.modeling_tf_bert.TFRobertaIntermediate
502
+ class TFLayoutLMv3Intermediate(keras.layers.Layer):
503
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
504
+ super().__init__(**kwargs)
505
+
506
+ self.dense = keras.layers.Dense(
507
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
508
+ )
509
+
510
+ if isinstance(config.hidden_act, str):
511
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
512
+ else:
513
+ self.intermediate_act_fn = config.hidden_act
514
+ self.config = config
515
+
516
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
517
+ hidden_states = self.dense(inputs=hidden_states)
518
+ hidden_states = self.intermediate_act_fn(hidden_states)
519
+
520
+ return hidden_states
521
+
522
+ def build(self, input_shape=None):
523
+ if self.built:
524
+ return
525
+ self.built = True
526
+ if getattr(self, "dense", None) is not None:
527
+ with tf.name_scope(self.dense.name):
528
+ self.dense.build([None, None, self.config.hidden_size])
529
+
530
+
531
+ # Copied from models.roberta.modeling_tf_bert.TFRobertaOutput
532
+ class TFLayoutLMv3Output(keras.layers.Layer):
533
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
534
+ super().__init__(**kwargs)
535
+
536
+ self.dense = keras.layers.Dense(
537
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
538
+ )
539
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
540
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
541
+ self.config = config
542
+
543
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
544
+ hidden_states = self.dense(inputs=hidden_states)
545
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
546
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
547
+
548
+ return hidden_states
549
+
550
+ def build(self, input_shape=None):
551
+ if self.built:
552
+ return
553
+ self.built = True
554
+ if getattr(self, "dense", None) is not None:
555
+ with tf.name_scope(self.dense.name):
556
+ self.dense.build([None, None, self.config.intermediate_size])
557
+ if getattr(self, "LayerNorm", None) is not None:
558
+ with tf.name_scope(self.LayerNorm.name):
559
+ self.LayerNorm.build([None, None, self.config.hidden_size])
560
+
561
+
562
+ class TFLayoutLMv3Layer(keras.layers.Layer):
563
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
564
+ super().__init__(**kwargs)
565
+ self.attention = TFLayoutLMv3Attention(config, name="attention")
566
+ self.intermediate = TFLayoutLMv3Intermediate(config, name="intermediate")
567
+ self.bert_output = TFLayoutLMv3Output(config, name="output")
568
+
569
+ def call(
570
+ self,
571
+ hidden_states: tf.Tensor,
572
+ attention_mask: tf.Tensor | None,
573
+ head_mask: tf.Tensor | None,
574
+ output_attentions: bool,
575
+ rel_pos: tf.Tensor | None = None,
576
+ rel_2d_pos: tf.Tensor | None = None,
577
+ training: bool = False,
578
+ ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
579
+ self_attention_outputs = self.attention(
580
+ hidden_states,
581
+ attention_mask,
582
+ head_mask,
583
+ output_attentions=output_attentions,
584
+ rel_pos=rel_pos,
585
+ rel_2d_pos=rel_2d_pos,
586
+ training=training,
587
+ )
588
+ attention_output = self_attention_outputs[0]
589
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
590
+ intermediate_output = self.intermediate(attention_output)
591
+ layer_output = self.bert_output(intermediate_output, attention_output, training=training)
592
+ outputs = (layer_output,) + outputs
593
+ return outputs
594
+
595
+ def build(self, input_shape=None):
596
+ if self.built:
597
+ return
598
+ self.built = True
599
+ if getattr(self, "attention", None) is not None:
600
+ with tf.name_scope(self.attention.name):
601
+ self.attention.build(None)
602
+ if getattr(self, "intermediate", None) is not None:
603
+ with tf.name_scope(self.intermediate.name):
604
+ self.intermediate.build(None)
605
+ if getattr(self, "bert_output", None) is not None:
606
+ with tf.name_scope(self.bert_output.name):
607
+ self.bert_output.build(None)
608
+
609
+
610
+ class TFLayoutLMv3Encoder(keras.layers.Layer):
611
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
612
+ super().__init__(**kwargs)
613
+ self.config = config
614
+ self.layer = [TFLayoutLMv3Layer(config, name=f"layer.{i}") for i in range(config.num_hidden_layers)]
615
+
616
+ self.has_relative_attention_bias = config.has_relative_attention_bias
617
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
618
+
619
+ if self.has_relative_attention_bias:
620
+ self.rel_pos_bins = config.rel_pos_bins
621
+ self.max_rel_pos = config.max_rel_pos
622
+ self.rel_pos_bias = keras.layers.Dense(
623
+ units=config.num_attention_heads,
624
+ kernel_initializer=get_initializer(config.initializer_range),
625
+ use_bias=False,
626
+ name="rel_pos_bias",
627
+ )
628
+
629
+ if self.has_spatial_attention_bias:
630
+ self.max_rel_2d_pos = config.max_rel_2d_pos
631
+ self.rel_2d_pos_bins = config.rel_2d_pos_bins
632
+ self.rel_pos_x_bias = keras.layers.Dense(
633
+ units=config.num_attention_heads,
634
+ kernel_initializer=get_initializer(config.initializer_range),
635
+ use_bias=False,
636
+ name="rel_pos_x_bias",
637
+ )
638
+ self.rel_pos_y_bias = keras.layers.Dense(
639
+ units=config.num_attention_heads,
640
+ kernel_initializer=get_initializer(config.initializer_range),
641
+ use_bias=False,
642
+ name="rel_pos_y_bias",
643
+ )
644
+
645
+ def relative_position_bucket(self, relative_positions: tf.Tensor, num_buckets: int, max_distance: int):
646
+ # the negative relative positions are assigned to the interval [0, num_buckets / 2]
647
+ # we deal with this by assigning absolute relative positions to the interval [0, num_buckets / 2]
648
+ # and then offsetting the positive relative positions by num_buckets / 2 at the end
649
+ num_buckets = num_buckets // 2
650
+ buckets = tf.abs(relative_positions)
651
+
652
+ # half of the buckets are for exact increments in positions
653
+ max_exact_buckets = num_buckets // 2
654
+ is_small = buckets < max_exact_buckets
655
+
656
+ # the other half of the buckets are for logarithmically bigger bins in positions up to max_distance
657
+ buckets_log_ratio = tf.math.log(tf.cast(buckets, tf.float32) / max_exact_buckets)
658
+ distance_log_ratio = math.log(max_distance / max_exact_buckets)
659
+ buckets_big_offset = (
660
+ buckets_log_ratio / distance_log_ratio * (num_buckets - max_exact_buckets)
661
+ ) # scale is [0, num_buckets - max_exact_buckets]
662
+ buckets_big = max_exact_buckets + buckets_big_offset # scale is [max_exact_buckets, num_buckets]
663
+ buckets_big = tf.cast(buckets_big, buckets.dtype)
664
+ buckets_big = tf.minimum(buckets_big, num_buckets - 1)
665
+
666
+ return (tf.cast(relative_positions > 0, buckets.dtype) * num_buckets) + tf.where(
667
+ is_small, buckets, buckets_big
668
+ )
669
+
670
+ def _cal_pos_emb(
671
+ self,
672
+ dense_layer: keras.layers.Dense,
673
+ position_ids: tf.Tensor,
674
+ num_buckets: int,
675
+ max_distance: int,
676
+ ):
677
+ rel_pos_matrix = tf.expand_dims(position_ids, axis=-2) - tf.expand_dims(position_ids, axis=-1)
678
+ rel_pos = self.relative_position_bucket(rel_pos_matrix, num_buckets, max_distance)
679
+ rel_pos_one_hot = tf.one_hot(rel_pos, depth=num_buckets, dtype=self.compute_dtype)
680
+ embedding = dense_layer(rel_pos_one_hot)
681
+ # batch_size, seq_length, seq_length, num_heads --> batch_size, num_heads, seq_length, seq_length
682
+ embedding = tf.transpose(embedding, [0, 3, 1, 2])
683
+ embedding = tf.cast(embedding, dtype=self.compute_dtype)
684
+ return embedding
685
+
686
+ def _cal_1d_pos_emb(self, position_ids: tf.Tensor):
687
+ return self._cal_pos_emb(self.rel_pos_bias, position_ids, self.rel_pos_bins, self.max_rel_pos)
688
+
689
+ def _cal_2d_pos_emb(self, bbox: tf.Tensor):
690
+ position_coord_x = bbox[:, :, 0] # left
691
+ position_coord_y = bbox[:, :, 3] # bottom
692
+ rel_pos_x = self._cal_pos_emb(
693
+ self.rel_pos_x_bias,
694
+ position_coord_x,
695
+ self.rel_2d_pos_bins,
696
+ self.max_rel_2d_pos,
697
+ )
698
+ rel_pos_y = self._cal_pos_emb(
699
+ self.rel_pos_y_bias,
700
+ position_coord_y,
701
+ self.rel_2d_pos_bins,
702
+ self.max_rel_2d_pos,
703
+ )
704
+ rel_2d_pos = rel_pos_x + rel_pos_y
705
+ return rel_2d_pos
706
+
707
+ def call(
708
+ self,
709
+ hidden_states: tf.Tensor,
710
+ bbox: tf.Tensor | None = None,
711
+ attention_mask: tf.Tensor | None = None,
712
+ head_mask: tf.Tensor | None = None,
713
+ output_attentions: bool = False,
714
+ output_hidden_states: bool = False,
715
+ return_dict: bool = True,
716
+ position_ids: tf.Tensor | None = None,
717
+ training: bool = False,
718
+ ) -> Union[
719
+ TFBaseModelOutput,
720
+ Tuple[tf.Tensor],
721
+ Tuple[tf.Tensor, tf.Tensor],
722
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
723
+ ]:
724
+ all_hidden_states = () if output_hidden_states else None
725
+ all_self_attentions = () if output_attentions else None
726
+
727
+ rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None
728
+ rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None
729
+
730
+ for i, layer_module in enumerate(self.layer):
731
+ if output_hidden_states:
732
+ all_hidden_states = all_hidden_states + (hidden_states,)
733
+
734
+ layer_head_mask = head_mask[i] if head_mask is not None else None
735
+
736
+ layer_outputs = layer_module(
737
+ hidden_states,
738
+ attention_mask,
739
+ layer_head_mask,
740
+ output_attentions,
741
+ rel_pos=rel_pos,
742
+ rel_2d_pos=rel_2d_pos,
743
+ training=training,
744
+ )
745
+
746
+ hidden_states = layer_outputs[0]
747
+ if output_attentions:
748
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
749
+
750
+ if output_hidden_states:
751
+ all_hidden_states = all_hidden_states + (hidden_states,)
752
+
753
+ if return_dict:
754
+ return TFBaseModelOutput(
755
+ last_hidden_state=hidden_states,
756
+ hidden_states=all_hidden_states,
757
+ attentions=all_self_attentions,
758
+ )
759
+ else:
760
+ return tuple(
761
+ value for value in [hidden_states, all_hidden_states, all_self_attentions] if value is not None
762
+ )
763
+
764
+ def build(self, input_shape=None):
765
+ if self.built:
766
+ return
767
+ self.built = True
768
+ if getattr(self, "rel_pos_bias", None) is not None:
769
+ with tf.name_scope(self.rel_pos_bias.name):
770
+ self.rel_pos_bias.build([None, None, self.rel_pos_bins])
771
+ if getattr(self, "rel_pos_x_bias", None) is not None:
772
+ with tf.name_scope(self.rel_pos_x_bias.name):
773
+ self.rel_pos_x_bias.build([None, None, self.rel_2d_pos_bins])
774
+ if getattr(self, "rel_pos_y_bias", None) is not None:
775
+ with tf.name_scope(self.rel_pos_y_bias.name):
776
+ self.rel_pos_y_bias.build([None, None, self.rel_2d_pos_bins])
777
+ if getattr(self, "layer", None) is not None:
778
+ for layer in self.layer:
779
+ with tf.name_scope(layer.name):
780
+ layer.build(None)
781
+
782
+
783
+ @keras_serializable
784
+ class TFLayoutLMv3MainLayer(keras.layers.Layer):
785
+ config_class = LayoutLMv3Config
786
+
787
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
788
+ super().__init__(**kwargs)
789
+
790
+ self.config = config
791
+
792
+ if config.text_embed:
793
+ self.embeddings = TFLayoutLMv3TextEmbeddings(config, name="embeddings")
794
+
795
+ if config.visual_embed:
796
+ self.patch_embed = TFLayoutLMv3PatchEmbeddings(config, name="patch_embed")
797
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
798
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
799
+
800
+ if config.has_relative_attention_bias or config.has_spatial_attention_bias:
801
+ image_size = config.input_size // config.patch_size
802
+ self.init_visual_bbox(image_size=(image_size, image_size))
803
+
804
+ self.norm = keras.layers.LayerNormalization(epsilon=1e-6, name="norm")
805
+
806
+ self.encoder = TFLayoutLMv3Encoder(config, name="encoder")
807
+
808
+ def build(self, input_shape=None):
809
+ if self.config.visual_embed:
810
+ image_size = self.config.input_size // self.config.patch_size
811
+ self.cls_token = self.add_weight(
812
+ shape=(1, 1, self.config.hidden_size),
813
+ initializer="zeros",
814
+ trainable=True,
815
+ dtype=tf.float32,
816
+ name="cls_token",
817
+ )
818
+ self.pos_embed = self.add_weight(
819
+ shape=(1, image_size * image_size + 1, self.config.hidden_size),
820
+ initializer="zeros",
821
+ trainable=True,
822
+ dtype=tf.float32,
823
+ name="pos_embed",
824
+ )
825
+
826
+ if self.built:
827
+ return
828
+ self.built = True
829
+ if getattr(self, "encoder", None) is not None:
830
+ with tf.name_scope(self.encoder.name):
831
+ self.encoder.build(None)
832
+ if getattr(self, "embeddings", None) is not None:
833
+ with tf.name_scope(self.embeddings.name):
834
+ self.embeddings.build(None)
835
+ if getattr(self, "patch_embed", None) is not None:
836
+ with tf.name_scope(self.patch_embed.name):
837
+ self.patch_embed.build(None)
838
+ if getattr(self, "LayerNorm", None) is not None:
839
+ with tf.name_scope(self.LayerNorm.name):
840
+ self.LayerNorm.build([None, None, self.config.hidden_size])
841
+ if getattr(self, "dropout", None) is not None:
842
+ with tf.name_scope(self.dropout.name):
843
+ self.dropout.build(None)
844
+ if getattr(self, "norm", None) is not None:
845
+ with tf.name_scope(self.norm.name):
846
+ self.norm.build([None, None, self.config.hidden_size])
847
+
848
+ def get_input_embeddings(self) -> keras.layers.Layer:
849
+ return self.embeddings.word_embeddings
850
+
851
+ def set_input_embeddings(self, value: tf.Variable):
852
+ self.embeddings.word_embeddings.weight = value
853
+
854
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
855
+ def _prune_heads(self, heads_to_prune):
856
+ """
857
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
858
+ class PreTrainedModel
859
+ """
860
+ raise NotImplementedError
861
+
862
+ def init_visual_bbox(self, image_size: Tuple[int, int], max_len: int = 1000):
863
+ # We should not hardcode max_len to 1000, but it is done by the reference implementation,
864
+ # so we keep it for compatibility with the pretrained weights. The more correct approach
865
+ # would have been to pass on max_len=config.max_2d_position_embeddings - 1.
866
+ height, width = image_size
867
+
868
+ visual_bbox_x = tf.range(0, max_len * (width + 1), max_len) // width
869
+ visual_bbox_x = tf.expand_dims(visual_bbox_x, axis=0)
870
+ visual_bbox_x = tf.tile(visual_bbox_x, [width, 1]) # (width, width + 1)
871
+
872
+ visual_bbox_y = tf.range(0, max_len * (height + 1), max_len) // height
873
+ visual_bbox_y = tf.expand_dims(visual_bbox_y, axis=1)
874
+ visual_bbox_y = tf.tile(visual_bbox_y, [1, height]) # (height + 1, height)
875
+
876
+ visual_bbox = tf.stack(
877
+ [visual_bbox_x[:, :-1], visual_bbox_y[:-1], visual_bbox_x[:, 1:], visual_bbox_y[1:]],
878
+ axis=-1,
879
+ )
880
+ visual_bbox = tf.reshape(visual_bbox, [-1, 4])
881
+
882
+ cls_token_box = tf.constant([[1, 1, max_len - 1, max_len - 1]], dtype=tf.int32)
883
+ self.visual_bbox = tf.concat([cls_token_box, visual_bbox], axis=0)
884
+
885
+ def calculate_visual_bbox(self, batch_size: int, dtype: tf.DType):
886
+ visual_bbox = tf.expand_dims(self.visual_bbox, axis=0)
887
+ visual_bbox = tf.tile(visual_bbox, [batch_size, 1, 1])
888
+ visual_bbox = tf.cast(visual_bbox, dtype=dtype)
889
+ return visual_bbox
890
+
891
+ def embed_image(self, pixel_values: tf.Tensor) -> tf.Tensor:
892
+ embeddings = self.patch_embed(pixel_values)
893
+
894
+ # add [CLS] token
895
+ batch_size = tf.shape(embeddings)[0]
896
+ cls_tokens = tf.tile(self.cls_token, [batch_size, 1, 1])
897
+ embeddings = tf.concat([cls_tokens, embeddings], axis=1)
898
+
899
+ # add position embeddings
900
+ if getattr(self, "pos_embed", None) is not None:
901
+ embeddings += self.pos_embed
902
+
903
+ embeddings = self.norm(embeddings)
904
+ return embeddings
905
+
906
+ def get_extended_attention_mask(self, attention_mask: tf.Tensor) -> tf.Tensor:
907
+ # Adapted from transformers.modelling_utils.ModuleUtilsMixin.get_extended_attention_mask
908
+
909
+ n_dims = len(attention_mask.shape)
910
+
911
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
912
+ # ourselves in which case we just need to make it broadcastable to all heads.
913
+ if n_dims == 3:
914
+ extended_attention_mask = tf.expand_dims(attention_mask, axis=1)
915
+ elif n_dims == 2:
916
+ # Provided a padding mask of dimensions [batch_size, seq_length].
917
+ # Make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length].
918
+ extended_attention_mask = tf.expand_dims(attention_mask, axis=1) # (batch_size, 1, seq_length)
919
+ extended_attention_mask = tf.expand_dims(extended_attention_mask, axis=1) # (batch_size, 1, 1, seq_length)
920
+ else:
921
+ raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape}).")
922
+
923
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
924
+ # masked positions, this operation will create a tensor which is 0.0 for
925
+ # positions we want to attend and -10000.0 for masked positions.
926
+ # Since we are adding it to the raw scores before the softmax, this is
927
+ # effectively the same as removing these entirely.
928
+ extended_attention_mask = tf.cast(extended_attention_mask, self.compute_dtype)
929
+ extended_attention_mask = (1.0 - extended_attention_mask) * LARGE_NEGATIVE
930
+
931
+ return extended_attention_mask
932
+
933
+ def get_head_mask(self, head_mask: tf.Tensor | None) -> Union[tf.Tensor, List[tf.Tensor | None]]:
934
+ if head_mask is None:
935
+ return [None] * self.config.num_hidden_layers
936
+
937
+ n_dims = tf.rank(head_mask)
938
+ if n_dims == 1:
939
+ # Gets a tensor with masks for each head (H).
940
+ head_mask = tf.expand_dims(head_mask, axis=0) # 1, num_heads
941
+ head_mask = tf.expand_dims(head_mask, axis=0) # 1, 1, num_heads
942
+ head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1
943
+ head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1, 1
944
+ head_mask = tf.tile(
945
+ head_mask, [self.config.num_hidden_layers, 1, 1, 1, 1]
946
+ ) # seq_length, 1, num_heads, 1, 1
947
+ elif n_dims == 2:
948
+ # Gets a tensor with masks for each layer (L) and head (H).
949
+ head_mask = tf.expand_dims(head_mask, axis=1) # seq_length, 1, num_heads
950
+ head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1
951
+ head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1, 1
952
+ elif n_dims != 5:
953
+ raise ValueError(f"Wrong shape for head_mask (shape {head_mask.shape}).")
954
+ assert tf.rank(head_mask) == 5, f"Got head_mask rank of {tf.rank(head_mask)}, but require 5."
955
+ head_mask = tf.cast(head_mask, self.compute_dtype)
956
+ return head_mask
957
+
958
+ @unpack_inputs
959
+ def call(
960
+ self,
961
+ input_ids: tf.Tensor | None = None,
962
+ bbox: tf.Tensor | None = None,
963
+ attention_mask: tf.Tensor | None = None,
964
+ token_type_ids: tf.Tensor | None = None,
965
+ position_ids: tf.Tensor | None = None,
966
+ head_mask: tf.Tensor | None = None,
967
+ inputs_embeds: tf.Tensor | None = None,
968
+ pixel_values: tf.Tensor | None = None,
969
+ output_attentions: Optional[bool] = None,
970
+ output_hidden_states: Optional[bool] = None,
971
+ return_dict: Optional[bool] = None,
972
+ training: bool = False,
973
+ ) -> Union[
974
+ TFBaseModelOutput,
975
+ Tuple[tf.Tensor],
976
+ Tuple[tf.Tensor, tf.Tensor],
977
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
978
+ ]:
979
+ # This method can be called with a variety of modalities:
980
+ # 1. text + layout
981
+ # 2. text + layout + image
982
+ # 3. image
983
+ # The complexity of this method is mostly just due to handling of these different modalities.
984
+
985
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
986
+ output_hidden_states = (
987
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
988
+ )
989
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
990
+
991
+ if input_ids is not None:
992
+ input_shape = tf.shape(input_ids)
993
+ batch_size = input_shape[0]
994
+ seq_length = input_shape[1]
995
+ elif inputs_embeds is not None:
996
+ input_shape = tf.shape(inputs_embeds)
997
+ batch_size = input_shape[0]
998
+ seq_length = input_shape[1]
999
+ elif pixel_values is not None:
1000
+ batch_size = tf.shape(pixel_values)[0]
1001
+ else:
1002
+ raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
1003
+
1004
+ # Determine which integer dtype to use.
1005
+ if input_ids is not None:
1006
+ int_dtype = input_ids.dtype
1007
+ elif bbox is not None:
1008
+ int_dtype = bbox.dtype
1009
+ elif attention_mask is not None:
1010
+ int_dtype = attention_mask.dtype
1011
+ elif token_type_ids is not None:
1012
+ int_dtype = token_type_ids.dtype
1013
+ else:
1014
+ int_dtype = tf.int32
1015
+
1016
+ if input_ids is not None or inputs_embeds is not None:
1017
+ if attention_mask is None:
1018
+ attention_mask = tf.ones((batch_size, seq_length), dtype=int_dtype)
1019
+ if token_type_ids is None:
1020
+ token_type_ids = tf.zeros((batch_size, seq_length), dtype=int_dtype)
1021
+ if bbox is None:
1022
+ bbox = tf.zeros((batch_size, seq_length, 4), dtype=int_dtype)
1023
+
1024
+ embedding_output = self.embeddings(
1025
+ input_ids=input_ids,
1026
+ bbox=bbox,
1027
+ position_ids=position_ids,
1028
+ token_type_ids=token_type_ids,
1029
+ inputs_embeds=inputs_embeds,
1030
+ training=training,
1031
+ )
1032
+
1033
+ final_bbox = None
1034
+ final_position_ids = None
1035
+ if pixel_values is not None:
1036
+ # embed image
1037
+ visual_embeddings = self.embed_image(pixel_values)
1038
+
1039
+ # calculate attention mask
1040
+ visual_attention_mask = tf.ones((batch_size, tf.shape(visual_embeddings)[1]), dtype=int_dtype)
1041
+ if attention_mask is None:
1042
+ attention_mask = visual_attention_mask
1043
+ else:
1044
+ attention_mask = tf.concat([attention_mask, visual_attention_mask], axis=1)
1045
+
1046
+ # calculate bounding boxes
1047
+ if self.config.has_spatial_attention_bias:
1048
+ visual_bbox = self.calculate_visual_bbox(batch_size, int_dtype)
1049
+ if bbox is None:
1050
+ final_bbox = visual_bbox
1051
+ else:
1052
+ final_bbox = tf.concat([bbox, visual_bbox], axis=1)
1053
+
1054
+ # calculate position IDs
1055
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
1056
+ visual_position_ids = tf.range(0, tf.shape(visual_embeddings)[1], dtype=int_dtype)
1057
+ visual_position_ids = tf.expand_dims(visual_position_ids, axis=0)
1058
+ visual_position_ids = tf.tile(visual_position_ids, [batch_size, 1])
1059
+
1060
+ if input_ids is not None or inputs_embeds is not None:
1061
+ position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0)
1062
+ position_ids = tf.tile(position_ids, [batch_size, 1])
1063
+ final_position_ids = tf.concat([position_ids, visual_position_ids], axis=1)
1064
+ else:
1065
+ final_position_ids = visual_position_ids
1066
+
1067
+ # calculate embeddings
1068
+ if input_ids is None and inputs_embeds is None:
1069
+ embedding_output = visual_embeddings
1070
+ else:
1071
+ embedding_output = tf.concat([embedding_output, visual_embeddings], axis=1)
1072
+ embedding_output = self.LayerNorm(embedding_output)
1073
+ embedding_output = self.dropout(embedding_output, training=training)
1074
+
1075
+ elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
1076
+ if self.config.has_relative_attention_bias:
1077
+ position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0)
1078
+ position_ids = tf.tile(position_ids, [batch_size, 1])
1079
+ final_position_ids = position_ids
1080
+
1081
+ if self.config.has_spatial_attention_bias:
1082
+ final_bbox = bbox
1083
+
1084
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask)
1085
+
1086
+ # Prepare head mask if needed
1087
+ # 1.0 in head_mask indicate we keep the head
1088
+ # attention_probs has shape batch_size x num_heads x seq_length x seq_length
1089
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1090
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1091
+ head_mask = self.get_head_mask(head_mask)
1092
+
1093
+ encoder_outputs = self.encoder(
1094
+ embedding_output,
1095
+ bbox=final_bbox,
1096
+ position_ids=final_position_ids,
1097
+ attention_mask=extended_attention_mask,
1098
+ head_mask=head_mask,
1099
+ output_attentions=output_attentions,
1100
+ output_hidden_states=output_hidden_states,
1101
+ return_dict=return_dict,
1102
+ )
1103
+
1104
+ sequence_output = encoder_outputs[0]
1105
+
1106
+ if not return_dict:
1107
+ return (sequence_output,) + encoder_outputs[1:]
1108
+
1109
+ return TFBaseModelOutput(
1110
+ last_hidden_state=sequence_output,
1111
+ hidden_states=encoder_outputs.hidden_states,
1112
+ attentions=encoder_outputs.attentions,
1113
+ )
1114
+
1115
+ return TFBaseModelOutput(
1116
+ last_hidden_state=sequence_output,
1117
+ hidden_states=encoder_outputs.hidden_states,
1118
+ attentions=encoder_outputs.attentions,
1119
+ )
1120
+
1121
+
1122
+ class TFLayoutLMv3PreTrainedModel(TFPreTrainedModel):
1123
+ """
1124
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1125
+ models.
1126
+ """
1127
+
1128
+ config_class = LayoutLMv3Config
1129
+ base_model_prefix = "layoutlmv3"
1130
+
1131
+ @property
1132
+ def input_signature(self):
1133
+ sig = super().input_signature
1134
+ sig["bbox"] = tf.TensorSpec((None, None, 4), tf.int32, name="bbox")
1135
+ return sig
1136
+
1137
+
1138
+ LAYOUTLMV3_START_DOCSTRING = r"""
1139
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1140
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1141
+ etc.)
1142
+
1143
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1144
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1145
+ behavior.
1146
+
1147
+ <Tip>
1148
+
1149
+ TensorFlow models and layers in `transformers` accept two formats as input:
1150
+
1151
+ - having all inputs as keyword arguments (like PyTorch models), or
1152
+ - having all inputs as a list, tuple or dict in the first positional argument.
1153
+
1154
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1155
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1156
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1157
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1158
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1159
+ positional argument:
1160
+
1161
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1162
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1163
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1164
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1165
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1166
+
1167
+ Note that when creating models and layers with
1168
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1169
+ about any of this, as you can just pass inputs like you would to any other Python function!
1170
+
1171
+ </Tip>
1172
+
1173
+ Parameters:
1174
+ config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.
1175
+ Initializing with a config file does not load the weights associated with the model, only the
1176
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
1177
+ """
1178
+
1179
+ LAYOUTLMV3_INPUTS_DOCSTRING = r"""
1180
+ Args:
1181
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
1182
+ Indices of input sequence tokens in the vocabulary.
1183
+
1184
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1185
+ token. See `pixel_values` for `patch_sequence_length`.
1186
+
1187
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1188
+ [`PreTrainedTokenizer.__call__`] for details.
1189
+
1190
+ [What are input IDs?](../glossary#input-ids)
1191
+
1192
+ bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
1193
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
1194
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
1195
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
1196
+ y1) represents the position of the lower right corner.
1197
+
1198
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1199
+ token. See `pixel_values` for `patch_sequence_length`.
1200
+
1201
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
1202
+ Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
1203
+ config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
1204
+ config.patch_size) * (width / config.patch_size))`.
1205
+
1206
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1207
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1208
+
1209
+ - 1 for tokens that are **not masked**,
1210
+ - 0 for tokens that are **masked**.
1211
+
1212
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1213
+ token. See `pixel_values` for `patch_sequence_length`.
1214
+
1215
+ [What are attention masks?](../glossary#attention-mask)
1216
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1217
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1218
+ 1]`:
1219
+
1220
+ - 0 corresponds to a *sentence A* token,
1221
+ - 1 corresponds to a *sentence B* token.
1222
+
1223
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1224
+ token. See `pixel_values` for `patch_sequence_length`.
1225
+
1226
+ [What are token type IDs?](../glossary#token-type-ids)
1227
+ position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1228
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1229
+ config.max_position_embeddings - 1]`.
1230
+
1231
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1232
+ token. See `pixel_values` for `patch_sequence_length`.
1233
+
1234
+ [What are position IDs?](../glossary#position-ids)
1235
+ head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1236
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1237
+
1238
+ - 1 indicates the head is **not masked**,
1239
+ - 0 indicates the head is **masked**.
1240
+
1241
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1242
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1243
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
1244
+ model's internal embedding lookup matrix.
1245
+ output_attentions (`bool`, *optional*):
1246
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1247
+ tensors for more detail.
1248
+ output_hidden_states (`bool`, *optional*):
1249
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1250
+ more detail.
1251
+ return_dict (`bool`, *optional*):
1252
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1253
+ """
1254
+
1255
+
1256
+ @add_start_docstrings(
1257
+ "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.",
1258
+ LAYOUTLMV3_START_DOCSTRING,
1259
+ )
1260
+ class TFLayoutLMv3Model(TFLayoutLMv3PreTrainedModel):
1261
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1262
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1263
+
1264
+ def __init__(self, config, *inputs, **kwargs):
1265
+ super().__init__(config, *inputs, **kwargs)
1266
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1267
+
1268
+ @unpack_inputs
1269
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1270
+ @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
1271
+ def call(
1272
+ self,
1273
+ input_ids: tf.Tensor | None = None,
1274
+ bbox: tf.Tensor | None = None,
1275
+ attention_mask: tf.Tensor | None = None,
1276
+ token_type_ids: tf.Tensor | None = None,
1277
+ position_ids: tf.Tensor | None = None,
1278
+ head_mask: tf.Tensor | None = None,
1279
+ inputs_embeds: tf.Tensor | None = None,
1280
+ pixel_values: tf.Tensor | None = None,
1281
+ output_attentions: Optional[bool] = None,
1282
+ output_hidden_states: Optional[bool] = None,
1283
+ return_dict: Optional[bool] = None,
1284
+ training: bool = False,
1285
+ ) -> Union[
1286
+ TFBaseModelOutput,
1287
+ Tuple[tf.Tensor],
1288
+ Tuple[tf.Tensor, tf.Tensor],
1289
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1290
+ ]:
1291
+ r"""
1292
+ Returns:
1293
+
1294
+ Examples:
1295
+
1296
+ ```python
1297
+ >>> from transformers import AutoProcessor, TFAutoModel
1298
+ >>> from datasets import load_dataset
1299
+
1300
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1301
+ >>> model = TFAutoModel.from_pretrained("microsoft/layoutlmv3-base")
1302
+
1303
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1304
+ >>> example = dataset[0]
1305
+ >>> image = example["image"]
1306
+ >>> words = example["tokens"]
1307
+ >>> boxes = example["bboxes"]
1308
+
1309
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf")
1310
+
1311
+ >>> outputs = model(**encoding)
1312
+ >>> last_hidden_states = outputs.last_hidden_state
1313
+ ```"""
1314
+
1315
+ outputs = self.layoutlmv3(
1316
+ input_ids=input_ids,
1317
+ bbox=bbox,
1318
+ attention_mask=attention_mask,
1319
+ token_type_ids=token_type_ids,
1320
+ position_ids=position_ids,
1321
+ head_mask=head_mask,
1322
+ inputs_embeds=inputs_embeds,
1323
+ pixel_values=pixel_values,
1324
+ output_attentions=output_attentions,
1325
+ output_hidden_states=output_hidden_states,
1326
+ return_dict=return_dict,
1327
+ training=training,
1328
+ )
1329
+
1330
+ return outputs
1331
+
1332
+ def build(self, input_shape=None):
1333
+ if self.built:
1334
+ return
1335
+ self.built = True
1336
+ if getattr(self, "layoutlmv3", None) is not None:
1337
+ with tf.name_scope(self.layoutlmv3.name):
1338
+ self.layoutlmv3.build(None)
1339
+
1340
+
1341
+ class TFLayoutLMv3ClassificationHead(keras.layers.Layer):
1342
+ """
1343
+ Head for sentence-level classification tasks. Reference: RobertaClassificationHead
1344
+ """
1345
+
1346
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1347
+ super().__init__(**kwargs)
1348
+ self.dense = keras.layers.Dense(
1349
+ config.hidden_size,
1350
+ activation="tanh",
1351
+ kernel_initializer=get_initializer(config.initializer_range),
1352
+ name="dense",
1353
+ )
1354
+ classifier_dropout = (
1355
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1356
+ )
1357
+ self.dropout = keras.layers.Dropout(
1358
+ classifier_dropout,
1359
+ name="dropout",
1360
+ )
1361
+ self.out_proj = keras.layers.Dense(
1362
+ config.num_labels,
1363
+ kernel_initializer=get_initializer(config.initializer_range),
1364
+ name="out_proj",
1365
+ )
1366
+ self.config = config
1367
+
1368
+ def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
1369
+ outputs = self.dropout(inputs, training=training)
1370
+ outputs = self.dense(outputs)
1371
+ outputs = self.dropout(outputs, training=training)
1372
+ outputs = self.out_proj(outputs)
1373
+ return outputs
1374
+
1375
+ def build(self, input_shape=None):
1376
+ if self.built:
1377
+ return
1378
+ self.built = True
1379
+ if getattr(self, "dense", None) is not None:
1380
+ with tf.name_scope(self.dense.name):
1381
+ self.dense.build([None, None, self.config.hidden_size])
1382
+ if getattr(self, "dropout", None) is not None:
1383
+ with tf.name_scope(self.dropout.name):
1384
+ self.dropout.build(None)
1385
+ if getattr(self, "out_proj", None) is not None:
1386
+ with tf.name_scope(self.out_proj.name):
1387
+ self.out_proj.build([None, None, self.config.hidden_size])
1388
+
1389
+
1390
+ @add_start_docstrings(
1391
+ """
1392
+ LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
1393
+ [CLS] token) e.g. for document image classification tasks such as the
1394
+ [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
1395
+ """,
1396
+ LAYOUTLMV3_START_DOCSTRING,
1397
+ )
1398
+ class TFLayoutLMv3ForSequenceClassification(TFLayoutLMv3PreTrainedModel, TFSequenceClassificationLoss):
1399
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1400
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1401
+
1402
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1403
+ super().__init__(config, **kwargs)
1404
+ self.config = config
1405
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1406
+ self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier")
1407
+
1408
+ @unpack_inputs
1409
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1410
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1411
+ def call(
1412
+ self,
1413
+ input_ids: tf.Tensor | None = None,
1414
+ attention_mask: tf.Tensor | None = None,
1415
+ token_type_ids: tf.Tensor | None = None,
1416
+ position_ids: tf.Tensor | None = None,
1417
+ head_mask: tf.Tensor | None = None,
1418
+ inputs_embeds: tf.Tensor | None = None,
1419
+ labels: tf.Tensor | None = None,
1420
+ output_attentions: Optional[bool] = None,
1421
+ output_hidden_states: Optional[bool] = None,
1422
+ return_dict: Optional[bool] = None,
1423
+ bbox: tf.Tensor | None = None,
1424
+ pixel_values: tf.Tensor | None = None,
1425
+ training: Optional[bool] = False,
1426
+ ) -> Union[
1427
+ TFSequenceClassifierOutput,
1428
+ Tuple[tf.Tensor],
1429
+ Tuple[tf.Tensor, tf.Tensor],
1430
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1431
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
1432
+ ]:
1433
+ """
1434
+ Returns:
1435
+
1436
+ Examples:
1437
+
1438
+ ```python
1439
+ >>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification
1440
+ >>> from datasets import load_dataset
1441
+ >>> import tensorflow as tf
1442
+
1443
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1444
+ >>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
1445
+
1446
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1447
+ >>> example = dataset[0]
1448
+ >>> image = example["image"]
1449
+ >>> words = example["tokens"]
1450
+ >>> boxes = example["bboxes"]
1451
+
1452
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf")
1453
+ >>> sequence_label = tf.convert_to_tensor([1])
1454
+
1455
+ >>> outputs = model(**encoding, labels=sequence_label)
1456
+ >>> loss = outputs.loss
1457
+ >>> logits = outputs.logits
1458
+ ```"""
1459
+
1460
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1461
+
1462
+ outputs = self.layoutlmv3(
1463
+ input_ids,
1464
+ attention_mask=attention_mask,
1465
+ token_type_ids=token_type_ids,
1466
+ position_ids=position_ids,
1467
+ head_mask=head_mask,
1468
+ inputs_embeds=inputs_embeds,
1469
+ output_attentions=output_attentions,
1470
+ output_hidden_states=output_hidden_states,
1471
+ return_dict=return_dict,
1472
+ bbox=bbox,
1473
+ pixel_values=pixel_values,
1474
+ training=training,
1475
+ )
1476
+ sequence_output = outputs[0][:, 0, :]
1477
+ logits = self.classifier(sequence_output, training=training)
1478
+
1479
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1480
+
1481
+ if not return_dict:
1482
+ output = (logits,) + outputs[1:]
1483
+ return ((loss,) + output) if loss is not None else output
1484
+
1485
+ return TFSequenceClassifierOutput(
1486
+ loss=loss,
1487
+ logits=logits,
1488
+ hidden_states=outputs.hidden_states,
1489
+ attentions=outputs.attentions,
1490
+ )
1491
+
1492
+ def build(self, input_shape=None):
1493
+ if self.built:
1494
+ return
1495
+ self.built = True
1496
+ if getattr(self, "layoutlmv3", None) is not None:
1497
+ with tf.name_scope(self.layoutlmv3.name):
1498
+ self.layoutlmv3.build(None)
1499
+ if getattr(self, "classifier", None) is not None:
1500
+ with tf.name_scope(self.classifier.name):
1501
+ self.classifier.build(None)
1502
+
1503
+
1504
+ @add_start_docstrings(
1505
+ """
1506
+ LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.
1507
+ for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),
1508
+ [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and
1509
+ [Kleister-NDA](https://github.com/applicaai/kleister-nda).
1510
+ """,
1511
+ LAYOUTLMV3_START_DOCSTRING,
1512
+ )
1513
+ class TFLayoutLMv3ForTokenClassification(TFLayoutLMv3PreTrainedModel, TFTokenClassificationLoss):
1514
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1515
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1516
+
1517
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1518
+ super().__init__(config, **kwargs)
1519
+ self.num_labels = config.num_labels
1520
+
1521
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1522
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
1523
+ if config.num_labels < 10:
1524
+ self.classifier = keras.layers.Dense(
1525
+ config.num_labels,
1526
+ kernel_initializer=get_initializer(config.initializer_range),
1527
+ name="classifier",
1528
+ )
1529
+ else:
1530
+ self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier")
1531
+ self.config = config
1532
+
1533
+ @unpack_inputs
1534
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1535
+ @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1536
+ def call(
1537
+ self,
1538
+ input_ids: tf.Tensor | None = None,
1539
+ bbox: tf.Tensor | None = None,
1540
+ attention_mask: tf.Tensor | None = None,
1541
+ token_type_ids: tf.Tensor | None = None,
1542
+ position_ids: tf.Tensor | None = None,
1543
+ head_mask: tf.Tensor | None = None,
1544
+ inputs_embeds: tf.Tensor | None = None,
1545
+ labels: tf.Tensor | None = None,
1546
+ output_attentions: Optional[bool] = None,
1547
+ output_hidden_states: Optional[bool] = None,
1548
+ return_dict: Optional[bool] = None,
1549
+ pixel_values: tf.Tensor | None = None,
1550
+ training: Optional[bool] = False,
1551
+ ) -> Union[
1552
+ TFTokenClassifierOutput,
1553
+ Tuple[tf.Tensor],
1554
+ Tuple[tf.Tensor, tf.Tensor],
1555
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1556
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
1557
+ ]:
1558
+ r"""
1559
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1560
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1561
+
1562
+ Returns:
1563
+
1564
+ Examples:
1565
+
1566
+ ```python
1567
+ >>> from transformers import AutoProcessor, TFAutoModelForTokenClassification
1568
+ >>> from datasets import load_dataset
1569
+
1570
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1571
+ >>> model = TFAutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
1572
+
1573
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1574
+ >>> example = dataset[0]
1575
+ >>> image = example["image"]
1576
+ >>> words = example["tokens"]
1577
+ >>> boxes = example["bboxes"]
1578
+ >>> word_labels = example["ner_tags"]
1579
+
1580
+ >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="tf")
1581
+
1582
+ >>> outputs = model(**encoding)
1583
+ >>> loss = outputs.loss
1584
+ >>> logits = outputs.logits
1585
+ ```"""
1586
+
1587
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1588
+
1589
+ outputs = self.layoutlmv3(
1590
+ input_ids,
1591
+ bbox=bbox,
1592
+ attention_mask=attention_mask,
1593
+ token_type_ids=token_type_ids,
1594
+ position_ids=position_ids,
1595
+ head_mask=head_mask,
1596
+ inputs_embeds=inputs_embeds,
1597
+ output_attentions=output_attentions,
1598
+ output_hidden_states=output_hidden_states,
1599
+ return_dict=return_dict,
1600
+ pixel_values=pixel_values,
1601
+ training=training,
1602
+ )
1603
+ if input_ids is not None:
1604
+ input_shape = tf.shape(input_ids)
1605
+ else:
1606
+ input_shape = tf.shape(inputs_embeds)[:-1]
1607
+
1608
+ seq_length = input_shape[1]
1609
+ # only take the text part of the output representations
1610
+ sequence_output = outputs[0][:, :seq_length]
1611
+ sequence_output = self.dropout(sequence_output, training=training)
1612
+ logits = self.classifier(sequence_output)
1613
+
1614
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1615
+
1616
+ if not return_dict:
1617
+ output = (logits,) + outputs[1:]
1618
+ return ((loss,) + output) if loss is not None else output
1619
+
1620
+ return TFTokenClassifierOutput(
1621
+ loss=loss,
1622
+ logits=logits,
1623
+ hidden_states=outputs.hidden_states,
1624
+ attentions=outputs.attentions,
1625
+ )
1626
+
1627
+ def build(self, input_shape=None):
1628
+ if self.built:
1629
+ return
1630
+ self.built = True
1631
+ if getattr(self, "layoutlmv3", None) is not None:
1632
+ with tf.name_scope(self.layoutlmv3.name):
1633
+ self.layoutlmv3.build(None)
1634
+ if getattr(self, "dropout", None) is not None:
1635
+ with tf.name_scope(self.dropout.name):
1636
+ self.dropout.build(None)
1637
+ if getattr(self, "classifier", None) is not None:
1638
+ with tf.name_scope(self.classifier.name):
1639
+ self.classifier.build([None, None, self.config.hidden_size])
1640
+
1641
+
1642
+ @add_start_docstrings(
1643
+ """
1644
+ LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as
1645
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
1646
+ compute `span start logits` and `span end logits`).
1647
+ """,
1648
+ LAYOUTLMV3_START_DOCSTRING,
1649
+ )
1650
+ class TFLayoutLMv3ForQuestionAnswering(TFLayoutLMv3PreTrainedModel, TFQuestionAnsweringLoss):
1651
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1652
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1653
+
1654
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1655
+ super().__init__(config, **kwargs)
1656
+
1657
+ self.num_labels = config.num_labels
1658
+
1659
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1660
+ self.qa_outputs = TFLayoutLMv3ClassificationHead(config, name="qa_outputs")
1661
+
1662
+ @unpack_inputs
1663
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1664
+ @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1665
+ def call(
1666
+ self,
1667
+ input_ids: tf.Tensor | None = None,
1668
+ attention_mask: tf.Tensor | None = None,
1669
+ token_type_ids: tf.Tensor | None = None,
1670
+ position_ids: tf.Tensor | None = None,
1671
+ head_mask: tf.Tensor | None = None,
1672
+ inputs_embeds: tf.Tensor | None = None,
1673
+ start_positions: tf.Tensor | None = None,
1674
+ end_positions: tf.Tensor | None = None,
1675
+ output_attentions: Optional[bool] = None,
1676
+ output_hidden_states: Optional[bool] = None,
1677
+ bbox: tf.Tensor | None = None,
1678
+ pixel_values: tf.Tensor | None = None,
1679
+ return_dict: Optional[bool] = None,
1680
+ training: bool = False,
1681
+ ) -> Union[
1682
+ TFQuestionAnsweringModelOutput,
1683
+ Tuple[tf.Tensor],
1684
+ Tuple[tf.Tensor, tf.Tensor],
1685
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1686
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
1687
+ ]:
1688
+ r"""
1689
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1690
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1691
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1692
+ are not taken into account for computing the loss.
1693
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1694
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1695
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1696
+ are not taken into account for computing the loss.
1697
+
1698
+ Returns:
1699
+
1700
+ Examples:
1701
+
1702
+ ```python
1703
+ >>> from transformers import AutoProcessor, TFAutoModelForQuestionAnswering
1704
+ >>> from datasets import load_dataset
1705
+ >>> import tensorflow as tf
1706
+
1707
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1708
+ >>> model = TFAutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
1709
+
1710
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1711
+ >>> example = dataset[0]
1712
+ >>> image = example["image"]
1713
+ >>> question = "what's his name?"
1714
+ >>> words = example["tokens"]
1715
+ >>> boxes = example["bboxes"]
1716
+
1717
+ >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="tf")
1718
+ >>> start_positions = tf.convert_to_tensor([1])
1719
+ >>> end_positions = tf.convert_to_tensor([3])
1720
+
1721
+ >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
1722
+ >>> loss = outputs.loss
1723
+ >>> start_scores = outputs.start_logits
1724
+ >>> end_scores = outputs.end_logits
1725
+ ```"""
1726
+
1727
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1728
+
1729
+ outputs = self.layoutlmv3(
1730
+ input_ids,
1731
+ attention_mask=attention_mask,
1732
+ token_type_ids=token_type_ids,
1733
+ position_ids=position_ids,
1734
+ head_mask=head_mask,
1735
+ inputs_embeds=inputs_embeds,
1736
+ output_attentions=output_attentions,
1737
+ output_hidden_states=output_hidden_states,
1738
+ return_dict=return_dict,
1739
+ bbox=bbox,
1740
+ pixel_values=pixel_values,
1741
+ training=training,
1742
+ )
1743
+
1744
+ sequence_output = outputs[0]
1745
+
1746
+ logits = self.qa_outputs(sequence_output, training=training)
1747
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1748
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1749
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1750
+
1751
+ loss = None
1752
+
1753
+ if start_positions is not None and end_positions is not None:
1754
+ labels = {"start_position": start_positions, "end_position": end_positions}
1755
+ loss = self.hf_compute_loss(labels, logits=(start_logits, end_logits))
1756
+
1757
+ if not return_dict:
1758
+ output = (start_logits, end_logits) + outputs[1:]
1759
+ return ((loss,) + output) if loss is not None else output
1760
+
1761
+ return TFQuestionAnsweringModelOutput(
1762
+ loss=loss,
1763
+ start_logits=start_logits,
1764
+ end_logits=end_logits,
1765
+ hidden_states=outputs.hidden_states,
1766
+ attentions=outputs.attentions,
1767
+ )
1768
+
1769
+ def build(self, input_shape=None):
1770
+ if self.built:
1771
+ return
1772
+ self.built = True
1773
+ if getattr(self, "layoutlmv3", None) is not None:
1774
+ with tf.name_scope(self.layoutlmv3.name):
1775
+ self.layoutlmv3.build(None)
1776
+ if getattr(self, "qa_outputs", None) is not None:
1777
+ with tf.name_scope(self.qa_outputs.name):
1778
+ self.qa_outputs.build(None)
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/processing_layoutlmv3.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LayoutLMv3.
17
+ """
18
+
19
+ import warnings
20
+ from typing import List, Optional, Union
21
+
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class LayoutLMv3Processor(ProcessorMixin):
28
+ r"""
29
+ Constructs a LayoutLMv3 processor which combines a LayoutLMv3 image processor and a LayoutLMv3 tokenizer into a
30
+ single processor.
31
+
32
+ [`LayoutLMv3Processor`] offers all the functionalities you need to prepare data for the model.
33
+
34
+ It first uses [`LayoutLMv3ImageProcessor`] to resize and normalize document images, and optionally applies OCR to
35
+ get words and normalized bounding boxes. These are then provided to [`LayoutLMv3Tokenizer`] or
36
+ [`LayoutLMv3TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
37
+ `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
38
+ into token-level `labels` for token classification tasks (such as FUNSD, CORD).
39
+
40
+ Args:
41
+ image_processor (`LayoutLMv3ImageProcessor`, *optional*):
42
+ An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input.
43
+ tokenizer (`LayoutLMv3Tokenizer` or `LayoutLMv3TokenizerFast`, *optional*):
44
+ An instance of [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`]. The tokenizer is a required input.
45
+ """
46
+
47
+ attributes = ["image_processor", "tokenizer"]
48
+ image_processor_class = "LayoutLMv3ImageProcessor"
49
+ tokenizer_class = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
50
+
51
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
52
+ feature_extractor = None
53
+ if "feature_extractor" in kwargs:
54
+ warnings.warn(
55
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
56
+ " instead.",
57
+ FutureWarning,
58
+ )
59
+ feature_extractor = kwargs.pop("feature_extractor")
60
+
61
+ image_processor = image_processor if image_processor is not None else feature_extractor
62
+ if image_processor is None:
63
+ raise ValueError("You need to specify an `image_processor`.")
64
+ if tokenizer is None:
65
+ raise ValueError("You need to specify a `tokenizer`.")
66
+
67
+ super().__init__(image_processor, tokenizer)
68
+
69
+ def __call__(
70
+ self,
71
+ images,
72
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
73
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
74
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
75
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
76
+ add_special_tokens: bool = True,
77
+ padding: Union[bool, str, PaddingStrategy] = False,
78
+ truncation: Union[bool, str, TruncationStrategy] = None,
79
+ max_length: Optional[int] = None,
80
+ stride: int = 0,
81
+ pad_to_multiple_of: Optional[int] = None,
82
+ return_token_type_ids: Optional[bool] = None,
83
+ return_attention_mask: Optional[bool] = None,
84
+ return_overflowing_tokens: bool = False,
85
+ return_special_tokens_mask: bool = False,
86
+ return_offsets_mapping: bool = False,
87
+ return_length: bool = False,
88
+ verbose: bool = True,
89
+ return_tensors: Optional[Union[str, TensorType]] = None,
90
+ **kwargs,
91
+ ) -> BatchEncoding:
92
+ """
93
+ This method first forwards the `images` argument to [`~LayoutLMv3ImageProcessor.__call__`]. In case
94
+ [`LayoutLMv3ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
95
+ bounding boxes along with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output,
96
+ together with resized and normalized `pixel_values`. In case [`LayoutLMv3ImageProcessor`] was initialized with
97
+ `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along
98
+ with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output, together with
99
+ resized and normalized `pixel_values`.
100
+
101
+ Please refer to the docstring of the above two methods for more information.
102
+ """
103
+ # verify input
104
+ if self.image_processor.apply_ocr and (boxes is not None):
105
+ raise ValueError(
106
+ "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True."
107
+ )
108
+
109
+ if self.image_processor.apply_ocr and (word_labels is not None):
110
+ raise ValueError(
111
+ "You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
112
+ )
113
+
114
+ # first, apply the image processor
115
+ features = self.image_processor(images=images, return_tensors=return_tensors)
116
+
117
+ # second, apply the tokenizer
118
+ if text is not None and self.image_processor.apply_ocr and text_pair is None:
119
+ if isinstance(text, str):
120
+ text = [text] # add batch dimension (as the image processor always adds a batch dimension)
121
+ text_pair = features["words"]
122
+
123
+ encoded_inputs = self.tokenizer(
124
+ text=text if text is not None else features["words"],
125
+ text_pair=text_pair if text_pair is not None else None,
126
+ boxes=boxes if boxes is not None else features["boxes"],
127
+ word_labels=word_labels,
128
+ add_special_tokens=add_special_tokens,
129
+ padding=padding,
130
+ truncation=truncation,
131
+ max_length=max_length,
132
+ stride=stride,
133
+ pad_to_multiple_of=pad_to_multiple_of,
134
+ return_token_type_ids=return_token_type_ids,
135
+ return_attention_mask=return_attention_mask,
136
+ return_overflowing_tokens=return_overflowing_tokens,
137
+ return_special_tokens_mask=return_special_tokens_mask,
138
+ return_offsets_mapping=return_offsets_mapping,
139
+ return_length=return_length,
140
+ verbose=verbose,
141
+ return_tensors=return_tensors,
142
+ **kwargs,
143
+ )
144
+
145
+ # add pixel values
146
+ images = features.pop("pixel_values")
147
+ if return_overflowing_tokens is True:
148
+ images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
149
+ encoded_inputs["pixel_values"] = images
150
+
151
+ return encoded_inputs
152
+
153
+ def get_overflowing_images(self, images, overflow_to_sample_mapping):
154
+ # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
155
+ images_with_overflow = []
156
+ for sample_idx in overflow_to_sample_mapping:
157
+ images_with_overflow.append(images[sample_idx])
158
+
159
+ if len(images_with_overflow) != len(overflow_to_sample_mapping):
160
+ raise ValueError(
161
+ "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
162
+ f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
163
+ )
164
+
165
+ return images_with_overflow
166
+
167
+ def batch_decode(self, *args, **kwargs):
168
+ """
169
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
170
+ refer to the docstring of this method for more information.
171
+ """
172
+ return self.tokenizer.batch_decode(*args, **kwargs)
173
+
174
+ def decode(self, *args, **kwargs):
175
+ """
176
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
177
+ to the docstring of this method for more information.
178
+ """
179
+ return self.tokenizer.decode(*args, **kwargs)
180
+
181
+ @property
182
+ def model_input_names(self):
183
+ return ["input_ids", "bbox", "attention_mask", "pixel_values"]
184
+
185
+ @property
186
+ def feature_extractor_class(self):
187
+ warnings.warn(
188
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
189
+ FutureWarning,
190
+ )
191
+ return self.image_processor_class
192
+
193
+ @property
194
+ def feature_extractor(self):
195
+ warnings.warn(
196
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
197
+ FutureWarning,
198
+ )
199
+ return self.image_processor
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3.py ADDED
@@ -0,0 +1,1461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for LayoutLMv3. Same as LayoutLMv2, but RoBERTa-like BPE tokenization instead of WordPiece."""
16
+
17
+ import json
18
+ import os
19
+ from functools import lru_cache
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import regex as re
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PreTokenizedInput,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {
39
+ "vocab_file": "vocab.json",
40
+ "merges_file": "merges.txt",
41
+ }
42
+
43
+
44
+ LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING = r"""
45
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
46
+ Whether or not to encode the sequences with the special tokens relative to their model.
47
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
48
+ Activates and controls padding. Accepts the following values:
49
+
50
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
51
+ sequence if provided).
52
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
53
+ acceptable input length for the model if that argument is not provided.
54
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
55
+ lengths).
56
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
57
+ Activates and controls truncation. Accepts the following values:
58
+
59
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
60
+ to the maximum acceptable input length for the model if that argument is not provided. This will
61
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
62
+ sequences (or a batch of pairs) is provided.
63
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
64
+ maximum acceptable input length for the model if that argument is not provided. This will only
65
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
66
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
67
+ maximum acceptable input length for the model if that argument is not provided. This will only
68
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
69
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
70
+ greater than the model maximum admissible input size).
71
+ max_length (`int`, *optional*):
72
+ Controls the maximum length to use by one of the truncation/padding parameters.
73
+
74
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
75
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
76
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
77
+ stride (`int`, *optional*, defaults to 0):
78
+ If set to a number along with `max_length`, the overflowing tokens returned when
79
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
80
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
81
+ argument defines the number of overlapping tokens.
82
+ pad_to_multiple_of (`int`, *optional*):
83
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
84
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
85
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
86
+ If set, will return tensors instead of list of python integers. Acceptable values are:
87
+
88
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
89
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
90
+ - `'np'`: Return Numpy `np.ndarray` objects.
91
+ """
92
+
93
+
94
+ LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
95
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
96
+ Whether or not to encode the sequences with the special tokens relative to their model.
97
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
98
+ Activates and controls padding. Accepts the following values:
99
+
100
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
101
+ sequence if provided).
102
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
103
+ acceptable input length for the model if that argument is not provided.
104
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
105
+ lengths).
106
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
107
+ Activates and controls truncation. Accepts the following values:
108
+
109
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
110
+ to the maximum acceptable input length for the model if that argument is not provided. This will
111
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
112
+ sequences (or a batch of pairs) is provided.
113
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
114
+ maximum acceptable input length for the model if that argument is not provided. This will only
115
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
116
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
117
+ maximum acceptable input length for the model if that argument is not provided. This will only
118
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
119
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
120
+ greater than the model maximum admissible input size).
121
+ max_length (`int`, *optional*):
122
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
123
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
124
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
125
+ truncation/padding to a maximum length will be deactivated.
126
+ stride (`int`, *optional*, defaults to 0):
127
+ If set to a number along with `max_length`, the overflowing tokens returned when
128
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
129
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
130
+ argument defines the number of overlapping tokens.
131
+ pad_to_multiple_of (`int`, *optional*):
132
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
133
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
134
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
135
+ If set, will return tensors instead of list of python integers. Acceptable values are:
136
+
137
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
138
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
139
+ - `'np'`: Return Numpy `np.ndarray` objects.
140
+ """
141
+
142
+
143
+ @lru_cache()
144
+ # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
145
+ def bytes_to_unicode():
146
+ """
147
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
148
+ characters the bpe code barfs on.
149
+
150
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
151
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
152
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
153
+ tables between utf-8 bytes and unicode strings.
154
+ """
155
+ bs = (
156
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
157
+ )
158
+ cs = bs[:]
159
+ n = 0
160
+ for b in range(2**8):
161
+ if b not in bs:
162
+ bs.append(b)
163
+ cs.append(2**8 + n)
164
+ n += 1
165
+ cs = [chr(n) for n in cs]
166
+ return dict(zip(bs, cs))
167
+
168
+
169
+ # Copied from transformers.models.roberta.tokenization_roberta.get_pairs
170
+ def get_pairs(word):
171
+ """
172
+ Return set of symbol pairs in a word.
173
+
174
+ Word is represented as tuple of symbols (symbols being variable-length strings).
175
+ """
176
+ pairs = set()
177
+ prev_char = word[0]
178
+ for char in word[1:]:
179
+ pairs.add((prev_char, char))
180
+ prev_char = char
181
+ return pairs
182
+
183
+
184
+ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
185
+ r"""
186
+ Construct a LayoutLMv3 tokenizer. Based on [`RoBERTatokenizer`] (Byte Pair Encoding or BPE).
187
+ [`LayoutLMv3Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to
188
+ token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token
189
+ classification).
190
+
191
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
192
+ this superclass for more information regarding those methods.
193
+
194
+ [`LayoutLMv3Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
195
+ word-level bounding boxes into token-level bounding boxes.
196
+
197
+ Args:
198
+ vocab_file (`str`):
199
+ Path to the vocabulary file.
200
+ merges_file (`str`):
201
+ Path to the merges file.
202
+ errors (`str`, *optional*, defaults to `"replace"`):
203
+ Paradigm to follow when decoding bytes to UTF-8. See
204
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
205
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
206
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
207
+
208
+ <Tip>
209
+
210
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
211
+ sequence. The token used is the `cls_token`.
212
+
213
+ </Tip>
214
+
215
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
216
+ The end of sequence token.
217
+
218
+ <Tip>
219
+
220
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
221
+ The token used is the `sep_token`.
222
+
223
+ </Tip>
224
+
225
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
226
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
227
+ sequence classification or for a text and a question for question answering. It is also used as the last
228
+ token of a sequence built with special tokens.
229
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
230
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
231
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
232
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
233
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
234
+ token instead.
235
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
236
+ The token used for padding, for example when batching sequences of different lengths.
237
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
238
+ The token used for masking values. This is the token used when training this model with masked language
239
+ modeling. This is the token which the model will try to predict.
240
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
241
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
242
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
243
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
244
+ The bounding box to use for the special [CLS] token.
245
+ sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
246
+ The bounding box to use for the special [SEP] token.
247
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
248
+ The bounding box to use for the special [PAD] token.
249
+ pad_token_label (`int`, *optional*, defaults to -100):
250
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
251
+ CrossEntropyLoss.
252
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
253
+ Whether or not to only label the first subword, in case word labels are provided.
254
+ """
255
+
256
+ vocab_files_names = VOCAB_FILES_NAMES
257
+ model_input_names = ["input_ids", "attention_mask", "bbox"]
258
+
259
+ def __init__(
260
+ self,
261
+ vocab_file,
262
+ merges_file,
263
+ errors="replace",
264
+ bos_token="<s>",
265
+ eos_token="</s>",
266
+ sep_token="</s>",
267
+ cls_token="<s>",
268
+ unk_token="<unk>",
269
+ pad_token="<pad>",
270
+ mask_token="<mask>",
271
+ add_prefix_space=True,
272
+ cls_token_box=[0, 0, 0, 0],
273
+ sep_token_box=[0, 0, 0, 0],
274
+ pad_token_box=[0, 0, 0, 0],
275
+ pad_token_label=-100,
276
+ only_label_first_subword=True,
277
+ **kwargs,
278
+ ):
279
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
280
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
281
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
282
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
283
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
284
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
285
+
286
+ # Mask token behave like a normal word, i.e. include the space before it
287
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
288
+
289
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
290
+ self.encoder = json.load(vocab_handle)
291
+ self.decoder = {v: k for k, v in self.encoder.items()}
292
+ self.errors = errors # how to handle errors in decoding
293
+ self.byte_encoder = bytes_to_unicode()
294
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
295
+ with open(merges_file, encoding="utf-8") as merges_handle:
296
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
297
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
298
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
299
+ self.cache = {}
300
+ self.add_prefix_space = add_prefix_space
301
+
302
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
303
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
304
+
305
+ # additional properties
306
+ self.cls_token_box = cls_token_box
307
+ self.sep_token_box = sep_token_box
308
+ self.pad_token_box = pad_token_box
309
+ self.pad_token_label = pad_token_label
310
+ self.only_label_first_subword = only_label_first_subword
311
+
312
+ super().__init__(
313
+ errors=errors,
314
+ bos_token=bos_token,
315
+ eos_token=eos_token,
316
+ unk_token=unk_token,
317
+ sep_token=sep_token,
318
+ cls_token=cls_token,
319
+ pad_token=pad_token,
320
+ mask_token=mask_token,
321
+ add_prefix_space=add_prefix_space,
322
+ cls_token_box=cls_token_box,
323
+ sep_token_box=sep_token_box,
324
+ pad_token_box=pad_token_box,
325
+ pad_token_label=pad_token_label,
326
+ only_label_first_subword=only_label_first_subword,
327
+ **kwargs,
328
+ )
329
+
330
+ @property
331
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size
332
+ def vocab_size(self):
333
+ return len(self.encoder)
334
+
335
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab
336
+ def get_vocab(self):
337
+ vocab = dict(self.encoder).copy()
338
+ vocab.update(self.added_tokens_encoder)
339
+ return vocab
340
+
341
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe
342
+ def bpe(self, token):
343
+ if token in self.cache:
344
+ return self.cache[token]
345
+ word = tuple(token)
346
+ pairs = get_pairs(word)
347
+
348
+ if not pairs:
349
+ return token
350
+
351
+ while True:
352
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
353
+ if bigram not in self.bpe_ranks:
354
+ break
355
+ first, second = bigram
356
+ new_word = []
357
+ i = 0
358
+ while i < len(word):
359
+ try:
360
+ j = word.index(first, i)
361
+ except ValueError:
362
+ new_word.extend(word[i:])
363
+ break
364
+ else:
365
+ new_word.extend(word[i:j])
366
+ i = j
367
+
368
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
369
+ new_word.append(first + second)
370
+ i += 2
371
+ else:
372
+ new_word.append(word[i])
373
+ i += 1
374
+ new_word = tuple(new_word)
375
+ word = new_word
376
+ if len(word) == 1:
377
+ break
378
+ else:
379
+ pairs = get_pairs(word)
380
+ word = " ".join(word)
381
+ self.cache[token] = word
382
+ return word
383
+
384
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize
385
+ def _tokenize(self, text):
386
+ """Tokenize a string."""
387
+ bpe_tokens = []
388
+ for token in re.findall(self.pat, text):
389
+ token = "".join(
390
+ self.byte_encoder[b] for b in token.encode("utf-8")
391
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
392
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
393
+ return bpe_tokens
394
+
395
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id
396
+ def _convert_token_to_id(self, token):
397
+ """Converts a token (str) in an id using the vocab."""
398
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
399
+
400
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token
401
+ def _convert_id_to_token(self, index):
402
+ """Converts an index (integer) in a token (str) using the vocab."""
403
+ return self.decoder.get(index)
404
+
405
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string
406
+ def convert_tokens_to_string(self, tokens):
407
+ """Converts a sequence of tokens (string) in a single string."""
408
+ text = "".join(tokens)
409
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
410
+ return text
411
+
412
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary
413
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
414
+ if not os.path.isdir(save_directory):
415
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
416
+ return
417
+ vocab_file = os.path.join(
418
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
419
+ )
420
+ merge_file = os.path.join(
421
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
422
+ )
423
+
424
+ with open(vocab_file, "w", encoding="utf-8") as f:
425
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
426
+
427
+ index = 0
428
+ with open(merge_file, "w", encoding="utf-8") as writer:
429
+ writer.write("#version: 0.2\n")
430
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
431
+ if index != token_index:
432
+ logger.warning(
433
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
434
+ " Please check that the tokenizer is not corrupted!"
435
+ )
436
+ index = token_index
437
+ writer.write(" ".join(bpe_tokens) + "\n")
438
+ index += 1
439
+
440
+ return vocab_file, merge_file
441
+
442
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.build_inputs_with_special_tokens
443
+ def build_inputs_with_special_tokens(
444
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
445
+ ) -> List[int]:
446
+ """
447
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
448
+ adding special tokens. A RoBERTa sequence has the following format:
449
+
450
+ - single sequence: `<s> X </s>`
451
+ - pair of sequences: `<s> A </s></s> B </s>`
452
+
453
+ Args:
454
+ token_ids_0 (`List[int]`):
455
+ List of IDs to which the special tokens will be added.
456
+ token_ids_1 (`List[int]`, *optional*):
457
+ Optional second list of IDs for sequence pairs.
458
+
459
+ Returns:
460
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
461
+ """
462
+ if token_ids_1 is None:
463
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
464
+ cls = [self.cls_token_id]
465
+ sep = [self.sep_token_id]
466
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
467
+
468
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask
469
+ def get_special_tokens_mask(
470
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
471
+ ) -> List[int]:
472
+ """
473
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
474
+ special tokens using the tokenizer `prepare_for_model` method.
475
+
476
+ Args:
477
+ token_ids_0 (`List[int]`):
478
+ List of IDs.
479
+ token_ids_1 (`List[int]`, *optional*):
480
+ Optional second list of IDs for sequence pairs.
481
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
482
+ Whether or not the token list is already formatted with special tokens for the model.
483
+
484
+ Returns:
485
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
486
+ """
487
+ if already_has_special_tokens:
488
+ return super().get_special_tokens_mask(
489
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
490
+ )
491
+
492
+ if token_ids_1 is None:
493
+ return [1] + ([0] * len(token_ids_0)) + [1]
494
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
495
+
496
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences
497
+ def create_token_type_ids_from_sequences(
498
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
499
+ ) -> List[int]:
500
+ """
501
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
502
+ make use of token type ids, therefore a list of zeros is returned.
503
+
504
+ Args:
505
+ token_ids_0 (`List[int]`):
506
+ List of IDs.
507
+ token_ids_1 (`List[int]`, *optional*):
508
+ Optional second list of IDs for sequence pairs.
509
+
510
+ Returns:
511
+ `List[int]`: List of zeros.
512
+ """
513
+ sep = [self.sep_token_id]
514
+ cls = [self.cls_token_id]
515
+
516
+ if token_ids_1 is None:
517
+ return len(cls + token_ids_0 + sep) * [0]
518
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
519
+
520
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
521
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
522
+ # If the text starts with a token that should not be split, no space is added before the text in any case.
523
+ # It's necessary to match the fast tokenization
524
+ if (
525
+ (is_split_into_words or add_prefix_space)
526
+ and (len(text) > 0 and not text[0].isspace())
527
+ and sum([text.startswith(no_split_token) for no_split_token in self.added_tokens_encoder]) == 0
528
+ ):
529
+ text = " " + text
530
+ return (text, kwargs)
531
+
532
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
533
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.__call__
534
+ def __call__(
535
+ self,
536
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
537
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
538
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
539
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
540
+ add_special_tokens: bool = True,
541
+ padding: Union[bool, str, PaddingStrategy] = False,
542
+ truncation: Union[bool, str, TruncationStrategy] = None,
543
+ max_length: Optional[int] = None,
544
+ stride: int = 0,
545
+ pad_to_multiple_of: Optional[int] = None,
546
+ return_tensors: Optional[Union[str, TensorType]] = None,
547
+ return_token_type_ids: Optional[bool] = None,
548
+ return_attention_mask: Optional[bool] = None,
549
+ return_overflowing_tokens: bool = False,
550
+ return_special_tokens_mask: bool = False,
551
+ return_offsets_mapping: bool = False,
552
+ return_length: bool = False,
553
+ verbose: bool = True,
554
+ **kwargs,
555
+ ) -> BatchEncoding:
556
+ """
557
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
558
+ sequences with word-level normalized bounding boxes and optional labels.
559
+
560
+ Args:
561
+ text (`str`, `List[str]`, `List[List[str]]`):
562
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
563
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
564
+ words).
565
+ text_pair (`List[str]`, `List[List[str]]`):
566
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
567
+ (pretokenized string).
568
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
569
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
570
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
571
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
572
+ """
573
+
574
+ # Input type checking for clearer error
575
+ def _is_valid_text_input(t):
576
+ if isinstance(t, str):
577
+ # Strings are fine
578
+ return True
579
+ elif isinstance(t, (list, tuple)):
580
+ # List are fine as long as they are...
581
+ if len(t) == 0:
582
+ # ... empty
583
+ return True
584
+ elif isinstance(t[0], str):
585
+ # ... list of strings
586
+ return True
587
+ elif isinstance(t[0], (list, tuple)):
588
+ # ... list with an empty list or with a list of strings
589
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
590
+ else:
591
+ return False
592
+ else:
593
+ return False
594
+
595
+ if text_pair is not None:
596
+ # in case text + text_pair are provided, text = questions, text_pair = words
597
+ if not _is_valid_text_input(text):
598
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
599
+ if not isinstance(text_pair, (list, tuple)):
600
+ raise ValueError(
601
+ "Words must be of type `List[str]` (single pretokenized example), "
602
+ "or `List[List[str]]` (batch of pretokenized examples)."
603
+ )
604
+ else:
605
+ # in case only text is provided => must be words
606
+ if not isinstance(text, (list, tuple)):
607
+ raise ValueError(
608
+ "Words must be of type `List[str]` (single pretokenized example), "
609
+ "or `List[List[str]]` (batch of pretokenized examples)."
610
+ )
611
+
612
+ if text_pair is not None:
613
+ is_batched = isinstance(text, (list, tuple))
614
+ else:
615
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
616
+
617
+ words = text if text_pair is None else text_pair
618
+ if boxes is None:
619
+ raise ValueError("You must provide corresponding bounding boxes")
620
+ if is_batched:
621
+ if len(words) != len(boxes):
622
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
623
+ for words_example, boxes_example in zip(words, boxes):
624
+ if len(words_example) != len(boxes_example):
625
+ raise ValueError("You must provide as many words as there are bounding boxes")
626
+ else:
627
+ if len(words) != len(boxes):
628
+ raise ValueError("You must provide as many words as there are bounding boxes")
629
+
630
+ if is_batched:
631
+ if text_pair is not None and len(text) != len(text_pair):
632
+ raise ValueError(
633
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
634
+ f" {len(text_pair)}."
635
+ )
636
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
637
+ is_pair = bool(text_pair is not None)
638
+ return self.batch_encode_plus(
639
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
640
+ is_pair=is_pair,
641
+ boxes=boxes,
642
+ word_labels=word_labels,
643
+ add_special_tokens=add_special_tokens,
644
+ padding=padding,
645
+ truncation=truncation,
646
+ max_length=max_length,
647
+ stride=stride,
648
+ pad_to_multiple_of=pad_to_multiple_of,
649
+ return_tensors=return_tensors,
650
+ return_token_type_ids=return_token_type_ids,
651
+ return_attention_mask=return_attention_mask,
652
+ return_overflowing_tokens=return_overflowing_tokens,
653
+ return_special_tokens_mask=return_special_tokens_mask,
654
+ return_offsets_mapping=return_offsets_mapping,
655
+ return_length=return_length,
656
+ verbose=verbose,
657
+ **kwargs,
658
+ )
659
+ else:
660
+ return self.encode_plus(
661
+ text=text,
662
+ text_pair=text_pair,
663
+ boxes=boxes,
664
+ word_labels=word_labels,
665
+ add_special_tokens=add_special_tokens,
666
+ padding=padding,
667
+ truncation=truncation,
668
+ max_length=max_length,
669
+ stride=stride,
670
+ pad_to_multiple_of=pad_to_multiple_of,
671
+ return_tensors=return_tensors,
672
+ return_token_type_ids=return_token_type_ids,
673
+ return_attention_mask=return_attention_mask,
674
+ return_overflowing_tokens=return_overflowing_tokens,
675
+ return_special_tokens_mask=return_special_tokens_mask,
676
+ return_offsets_mapping=return_offsets_mapping,
677
+ return_length=return_length,
678
+ verbose=verbose,
679
+ **kwargs,
680
+ )
681
+
682
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
683
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.batch_encode_plus
684
+ def batch_encode_plus(
685
+ self,
686
+ batch_text_or_text_pairs: Union[
687
+ List[TextInput],
688
+ List[TextInputPair],
689
+ List[PreTokenizedInput],
690
+ ],
691
+ is_pair: bool = None,
692
+ boxes: Optional[List[List[List[int]]]] = None,
693
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
694
+ add_special_tokens: bool = True,
695
+ padding: Union[bool, str, PaddingStrategy] = False,
696
+ truncation: Union[bool, str, TruncationStrategy] = None,
697
+ max_length: Optional[int] = None,
698
+ stride: int = 0,
699
+ pad_to_multiple_of: Optional[int] = None,
700
+ return_tensors: Optional[Union[str, TensorType]] = None,
701
+ return_token_type_ids: Optional[bool] = None,
702
+ return_attention_mask: Optional[bool] = None,
703
+ return_overflowing_tokens: bool = False,
704
+ return_special_tokens_mask: bool = False,
705
+ return_offsets_mapping: bool = False,
706
+ return_length: bool = False,
707
+ verbose: bool = True,
708
+ **kwargs,
709
+ ) -> BatchEncoding:
710
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
711
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
712
+ padding=padding,
713
+ truncation=truncation,
714
+ max_length=max_length,
715
+ pad_to_multiple_of=pad_to_multiple_of,
716
+ verbose=verbose,
717
+ **kwargs,
718
+ )
719
+
720
+ return self._batch_encode_plus(
721
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
722
+ is_pair=is_pair,
723
+ boxes=boxes,
724
+ word_labels=word_labels,
725
+ add_special_tokens=add_special_tokens,
726
+ padding_strategy=padding_strategy,
727
+ truncation_strategy=truncation_strategy,
728
+ max_length=max_length,
729
+ stride=stride,
730
+ pad_to_multiple_of=pad_to_multiple_of,
731
+ return_tensors=return_tensors,
732
+ return_token_type_ids=return_token_type_ids,
733
+ return_attention_mask=return_attention_mask,
734
+ return_overflowing_tokens=return_overflowing_tokens,
735
+ return_special_tokens_mask=return_special_tokens_mask,
736
+ return_offsets_mapping=return_offsets_mapping,
737
+ return_length=return_length,
738
+ verbose=verbose,
739
+ **kwargs,
740
+ )
741
+
742
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_encode_plus
743
+ def _batch_encode_plus(
744
+ self,
745
+ batch_text_or_text_pairs: Union[
746
+ List[TextInput],
747
+ List[TextInputPair],
748
+ List[PreTokenizedInput],
749
+ ],
750
+ is_pair: bool = None,
751
+ boxes: Optional[List[List[List[int]]]] = None,
752
+ word_labels: Optional[List[List[int]]] = None,
753
+ add_special_tokens: bool = True,
754
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
755
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
756
+ max_length: Optional[int] = None,
757
+ stride: int = 0,
758
+ pad_to_multiple_of: Optional[int] = None,
759
+ return_tensors: Optional[Union[str, TensorType]] = None,
760
+ return_token_type_ids: Optional[bool] = None,
761
+ return_attention_mask: Optional[bool] = None,
762
+ return_overflowing_tokens: bool = False,
763
+ return_special_tokens_mask: bool = False,
764
+ return_offsets_mapping: bool = False,
765
+ return_length: bool = False,
766
+ verbose: bool = True,
767
+ **kwargs,
768
+ ) -> BatchEncoding:
769
+ if return_offsets_mapping:
770
+ raise NotImplementedError(
771
+ "return_offset_mapping is not available when using Python tokenizers. "
772
+ "To use this feature, change your tokenizer to one deriving from "
773
+ "transformers.PreTrainedTokenizerFast."
774
+ )
775
+
776
+ batch_outputs = self._batch_prepare_for_model(
777
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
778
+ is_pair=is_pair,
779
+ boxes=boxes,
780
+ word_labels=word_labels,
781
+ add_special_tokens=add_special_tokens,
782
+ padding_strategy=padding_strategy,
783
+ truncation_strategy=truncation_strategy,
784
+ max_length=max_length,
785
+ stride=stride,
786
+ pad_to_multiple_of=pad_to_multiple_of,
787
+ return_attention_mask=return_attention_mask,
788
+ return_token_type_ids=return_token_type_ids,
789
+ return_overflowing_tokens=return_overflowing_tokens,
790
+ return_special_tokens_mask=return_special_tokens_mask,
791
+ return_length=return_length,
792
+ return_tensors=return_tensors,
793
+ verbose=verbose,
794
+ )
795
+
796
+ return BatchEncoding(batch_outputs)
797
+
798
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
799
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_prepare_for_model
800
+ def _batch_prepare_for_model(
801
+ self,
802
+ batch_text_or_text_pairs,
803
+ is_pair: bool = None,
804
+ boxes: Optional[List[List[int]]] = None,
805
+ word_labels: Optional[List[List[int]]] = None,
806
+ add_special_tokens: bool = True,
807
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
808
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
809
+ max_length: Optional[int] = None,
810
+ stride: int = 0,
811
+ pad_to_multiple_of: Optional[int] = None,
812
+ return_tensors: Optional[str] = None,
813
+ return_token_type_ids: Optional[bool] = None,
814
+ return_attention_mask: Optional[bool] = None,
815
+ return_overflowing_tokens: bool = False,
816
+ return_special_tokens_mask: bool = False,
817
+ return_length: bool = False,
818
+ verbose: bool = True,
819
+ ) -> BatchEncoding:
820
+ """
821
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
822
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
823
+ manages a moving window (with user defined stride) for overflowing tokens.
824
+
825
+ Args:
826
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
827
+ """
828
+
829
+ batch_outputs = {}
830
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
831
+ batch_text_or_text_pair, boxes_example = example
832
+ outputs = self.prepare_for_model(
833
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
834
+ batch_text_or_text_pair[1] if is_pair else None,
835
+ boxes_example,
836
+ word_labels=word_labels[idx] if word_labels is not None else None,
837
+ add_special_tokens=add_special_tokens,
838
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
839
+ truncation=truncation_strategy.value,
840
+ max_length=max_length,
841
+ stride=stride,
842
+ pad_to_multiple_of=None, # we pad in batch afterward
843
+ return_attention_mask=False, # we pad in batch afterward
844
+ return_token_type_ids=return_token_type_ids,
845
+ return_overflowing_tokens=return_overflowing_tokens,
846
+ return_special_tokens_mask=return_special_tokens_mask,
847
+ return_length=return_length,
848
+ return_tensors=None, # We convert the whole batch to tensors at the end
849
+ prepend_batch_axis=False,
850
+ verbose=verbose,
851
+ )
852
+
853
+ for key, value in outputs.items():
854
+ if key not in batch_outputs:
855
+ batch_outputs[key] = []
856
+ batch_outputs[key].append(value)
857
+
858
+ batch_outputs = self.pad(
859
+ batch_outputs,
860
+ padding=padding_strategy.value,
861
+ max_length=max_length,
862
+ pad_to_multiple_of=pad_to_multiple_of,
863
+ return_attention_mask=return_attention_mask,
864
+ )
865
+
866
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
867
+
868
+ return batch_outputs
869
+
870
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING)
871
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode
872
+ def encode(
873
+ self,
874
+ text: Union[TextInput, PreTokenizedInput],
875
+ text_pair: Optional[PreTokenizedInput] = None,
876
+ boxes: Optional[List[List[int]]] = None,
877
+ word_labels: Optional[List[int]] = None,
878
+ add_special_tokens: bool = True,
879
+ padding: Union[bool, str, PaddingStrategy] = False,
880
+ truncation: Union[bool, str, TruncationStrategy] = None,
881
+ max_length: Optional[int] = None,
882
+ stride: int = 0,
883
+ pad_to_multiple_of: Optional[int] = None,
884
+ return_tensors: Optional[Union[str, TensorType]] = None,
885
+ return_token_type_ids: Optional[bool] = None,
886
+ return_attention_mask: Optional[bool] = None,
887
+ return_overflowing_tokens: bool = False,
888
+ return_special_tokens_mask: bool = False,
889
+ return_offsets_mapping: bool = False,
890
+ return_length: bool = False,
891
+ verbose: bool = True,
892
+ **kwargs,
893
+ ) -> List[int]:
894
+ encoded_inputs = self.encode_plus(
895
+ text=text,
896
+ text_pair=text_pair,
897
+ boxes=boxes,
898
+ word_labels=word_labels,
899
+ add_special_tokens=add_special_tokens,
900
+ padding=padding,
901
+ truncation=truncation,
902
+ max_length=max_length,
903
+ stride=stride,
904
+ pad_to_multiple_of=pad_to_multiple_of,
905
+ return_tensors=return_tensors,
906
+ return_token_type_ids=return_token_type_ids,
907
+ return_attention_mask=return_attention_mask,
908
+ return_overflowing_tokens=return_overflowing_tokens,
909
+ return_special_tokens_mask=return_special_tokens_mask,
910
+ return_offsets_mapping=return_offsets_mapping,
911
+ return_length=return_length,
912
+ verbose=verbose,
913
+ **kwargs,
914
+ )
915
+
916
+ return encoded_inputs["input_ids"]
917
+
918
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
919
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode_plus
920
+ def encode_plus(
921
+ self,
922
+ text: Union[TextInput, PreTokenizedInput],
923
+ text_pair: Optional[PreTokenizedInput] = None,
924
+ boxes: Optional[List[List[int]]] = None,
925
+ word_labels: Optional[List[int]] = None,
926
+ add_special_tokens: bool = True,
927
+ padding: Union[bool, str, PaddingStrategy] = False,
928
+ truncation: Union[bool, str, TruncationStrategy] = None,
929
+ max_length: Optional[int] = None,
930
+ stride: int = 0,
931
+ pad_to_multiple_of: Optional[int] = None,
932
+ return_tensors: Optional[Union[str, TensorType]] = None,
933
+ return_token_type_ids: Optional[bool] = None,
934
+ return_attention_mask: Optional[bool] = None,
935
+ return_overflowing_tokens: bool = False,
936
+ return_special_tokens_mask: bool = False,
937
+ return_offsets_mapping: bool = False,
938
+ return_length: bool = False,
939
+ verbose: bool = True,
940
+ **kwargs,
941
+ ) -> BatchEncoding:
942
+ """
943
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
944
+ `__call__` should be used instead.
945
+
946
+ Args:
947
+ text (`str`, `List[str]`, `List[List[str]]`):
948
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
949
+ text_pair (`List[str]` or `List[int]`, *optional*):
950
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
951
+ list of list of strings (words of a batch of examples).
952
+ """
953
+
954
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
955
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
956
+ padding=padding,
957
+ truncation=truncation,
958
+ max_length=max_length,
959
+ pad_to_multiple_of=pad_to_multiple_of,
960
+ verbose=verbose,
961
+ **kwargs,
962
+ )
963
+
964
+ return self._encode_plus(
965
+ text=text,
966
+ boxes=boxes,
967
+ text_pair=text_pair,
968
+ word_labels=word_labels,
969
+ add_special_tokens=add_special_tokens,
970
+ padding_strategy=padding_strategy,
971
+ truncation_strategy=truncation_strategy,
972
+ max_length=max_length,
973
+ stride=stride,
974
+ pad_to_multiple_of=pad_to_multiple_of,
975
+ return_tensors=return_tensors,
976
+ return_token_type_ids=return_token_type_ids,
977
+ return_attention_mask=return_attention_mask,
978
+ return_overflowing_tokens=return_overflowing_tokens,
979
+ return_special_tokens_mask=return_special_tokens_mask,
980
+ return_offsets_mapping=return_offsets_mapping,
981
+ return_length=return_length,
982
+ verbose=verbose,
983
+ **kwargs,
984
+ )
985
+
986
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._encode_plus
987
+ def _encode_plus(
988
+ self,
989
+ text: Union[TextInput, PreTokenizedInput],
990
+ text_pair: Optional[PreTokenizedInput] = None,
991
+ boxes: Optional[List[List[int]]] = None,
992
+ word_labels: Optional[List[int]] = None,
993
+ add_special_tokens: bool = True,
994
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
995
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
996
+ max_length: Optional[int] = None,
997
+ stride: int = 0,
998
+ pad_to_multiple_of: Optional[int] = None,
999
+ return_tensors: Optional[Union[str, TensorType]] = None,
1000
+ return_token_type_ids: Optional[bool] = None,
1001
+ return_attention_mask: Optional[bool] = None,
1002
+ return_overflowing_tokens: bool = False,
1003
+ return_special_tokens_mask: bool = False,
1004
+ return_offsets_mapping: bool = False,
1005
+ return_length: bool = False,
1006
+ verbose: bool = True,
1007
+ **kwargs,
1008
+ ) -> BatchEncoding:
1009
+ if return_offsets_mapping:
1010
+ raise NotImplementedError(
1011
+ "return_offset_mapping is not available when using Python tokenizers. "
1012
+ "To use this feature, change your tokenizer to one deriving from "
1013
+ "transformers.PreTrainedTokenizerFast. "
1014
+ "More information on available tokenizers at "
1015
+ "https://github.com/huggingface/transformers/pull/2674"
1016
+ )
1017
+
1018
+ return self.prepare_for_model(
1019
+ text=text,
1020
+ text_pair=text_pair,
1021
+ boxes=boxes,
1022
+ word_labels=word_labels,
1023
+ add_special_tokens=add_special_tokens,
1024
+ padding=padding_strategy.value,
1025
+ truncation=truncation_strategy.value,
1026
+ max_length=max_length,
1027
+ stride=stride,
1028
+ pad_to_multiple_of=pad_to_multiple_of,
1029
+ return_tensors=return_tensors,
1030
+ prepend_batch_axis=True,
1031
+ return_attention_mask=return_attention_mask,
1032
+ return_token_type_ids=return_token_type_ids,
1033
+ return_overflowing_tokens=return_overflowing_tokens,
1034
+ return_special_tokens_mask=return_special_tokens_mask,
1035
+ return_length=return_length,
1036
+ verbose=verbose,
1037
+ )
1038
+
1039
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
1040
+ def prepare_for_model(
1041
+ self,
1042
+ text: Union[TextInput, PreTokenizedInput],
1043
+ text_pair: Optional[PreTokenizedInput] = None,
1044
+ boxes: Optional[List[List[int]]] = None,
1045
+ word_labels: Optional[List[int]] = None,
1046
+ add_special_tokens: bool = True,
1047
+ padding: Union[bool, str, PaddingStrategy] = False,
1048
+ truncation: Union[bool, str, TruncationStrategy] = None,
1049
+ max_length: Optional[int] = None,
1050
+ stride: int = 0,
1051
+ pad_to_multiple_of: Optional[int] = None,
1052
+ return_tensors: Optional[Union[str, TensorType]] = None,
1053
+ return_token_type_ids: Optional[bool] = None,
1054
+ return_attention_mask: Optional[bool] = None,
1055
+ return_overflowing_tokens: bool = False,
1056
+ return_special_tokens_mask: bool = False,
1057
+ return_offsets_mapping: bool = False,
1058
+ return_length: bool = False,
1059
+ verbose: bool = True,
1060
+ prepend_batch_axis: bool = False,
1061
+ **kwargs,
1062
+ ) -> BatchEncoding:
1063
+ """
1064
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
1065
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
1066
+ (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
1067
+ *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
1068
+ combination of arguments will raise an error.
1069
+
1070
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
1071
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
1072
+ labeled with -100, such that they will be ignored by the loss function.
1073
+
1074
+ Args:
1075
+ text (`str`, `List[str]`, `List[List[str]]`):
1076
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
1077
+ text_pair (`List[str]` or `List[int]`, *optional*):
1078
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
1079
+ list of list of strings (words of a batch of examples).
1080
+ """
1081
+
1082
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1083
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1084
+ padding=padding,
1085
+ truncation=truncation,
1086
+ max_length=max_length,
1087
+ pad_to_multiple_of=pad_to_multiple_of,
1088
+ verbose=verbose,
1089
+ **kwargs,
1090
+ )
1091
+
1092
+ tokens = []
1093
+ pair_tokens = []
1094
+ token_boxes = []
1095
+ pair_token_boxes = []
1096
+ labels = []
1097
+
1098
+ if text_pair is None:
1099
+ if word_labels is None:
1100
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
1101
+ for word, box in zip(text, boxes):
1102
+ if len(word) < 1: # skip empty words
1103
+ continue
1104
+ word_tokens = self.tokenize(word)
1105
+ tokens.extend(word_tokens)
1106
+ token_boxes.extend([box] * len(word_tokens))
1107
+ else:
1108
+ # CASE 2: token classification (training)
1109
+ for word, box, label in zip(text, boxes, word_labels):
1110
+ if len(word) < 1: # skip empty words
1111
+ continue
1112
+ word_tokens = self.tokenize(word)
1113
+ tokens.extend(word_tokens)
1114
+ token_boxes.extend([box] * len(word_tokens))
1115
+ if self.only_label_first_subword:
1116
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
1117
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
1118
+ else:
1119
+ labels.extend([label] * len(word_tokens))
1120
+ else:
1121
+ # CASE 3: document visual question answering (inference)
1122
+ # text = question
1123
+ # text_pair = words
1124
+ tokens = self.tokenize(text)
1125
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))]
1126
+
1127
+ for word, box in zip(text_pair, boxes):
1128
+ if len(word) < 1: # skip empty words
1129
+ continue
1130
+ word_tokens = self.tokenize(word)
1131
+ pair_tokens.extend(word_tokens)
1132
+ pair_token_boxes.extend([box] * len(word_tokens))
1133
+
1134
+ # Create ids + pair_ids
1135
+ ids = self.convert_tokens_to_ids(tokens)
1136
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
1137
+
1138
+ if (
1139
+ return_overflowing_tokens
1140
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
1141
+ and pair_ids is not None
1142
+ ):
1143
+ raise ValueError(
1144
+ "Not possible to return overflowing tokens for pair of sequences with the "
1145
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
1146
+ "for instance `only_second` or `only_first`."
1147
+ )
1148
+
1149
+ # Compute the total size of the returned encodings
1150
+ pair = bool(pair_ids is not None)
1151
+ len_ids = len(ids)
1152
+ len_pair_ids = len(pair_ids) if pair else 0
1153
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
1154
+
1155
+ # Truncation: Handle max sequence length
1156
+ overflowing_tokens = []
1157
+ overflowing_token_boxes = []
1158
+ overflowing_labels = []
1159
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
1160
+ (
1161
+ ids,
1162
+ token_boxes,
1163
+ pair_ids,
1164
+ pair_token_boxes,
1165
+ labels,
1166
+ overflowing_tokens,
1167
+ overflowing_token_boxes,
1168
+ overflowing_labels,
1169
+ ) = self.truncate_sequences(
1170
+ ids,
1171
+ token_boxes,
1172
+ pair_ids=pair_ids,
1173
+ pair_token_boxes=pair_token_boxes,
1174
+ labels=labels,
1175
+ num_tokens_to_remove=total_len - max_length,
1176
+ truncation_strategy=truncation_strategy,
1177
+ stride=stride,
1178
+ )
1179
+
1180
+ if return_token_type_ids and not add_special_tokens:
1181
+ raise ValueError(
1182
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1183
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1184
+ "set return_token_type_ids to None."
1185
+ )
1186
+
1187
+ # Load from model defaults
1188
+ if return_token_type_ids is None:
1189
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1190
+ if return_attention_mask is None:
1191
+ return_attention_mask = "attention_mask" in self.model_input_names
1192
+
1193
+ encoded_inputs = {}
1194
+
1195
+ if return_overflowing_tokens:
1196
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
1197
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
1198
+ encoded_inputs["overflowing_labels"] = overflowing_labels
1199
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
1200
+
1201
+ # Add special tokens
1202
+ if add_special_tokens:
1203
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1204
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
1205
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
1206
+ if pair_token_boxes:
1207
+ pair_token_boxes = [self.sep_token_box] + pair_token_boxes + [self.sep_token_box]
1208
+ token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
1209
+ if labels:
1210
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
1211
+ else:
1212
+ sequence = ids + pair_ids if pair else ids
1213
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
1214
+ token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
1215
+
1216
+ # Build output dictionary
1217
+ encoded_inputs["input_ids"] = sequence
1218
+ encoded_inputs["bbox"] = token_boxes
1219
+ if return_token_type_ids:
1220
+ encoded_inputs["token_type_ids"] = token_type_ids
1221
+ if return_special_tokens_mask:
1222
+ if add_special_tokens:
1223
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
1224
+ else:
1225
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1226
+
1227
+ if labels:
1228
+ encoded_inputs["labels"] = labels
1229
+
1230
+ # Check lengths
1231
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
1232
+
1233
+ # Padding
1234
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
1235
+ encoded_inputs = self.pad(
1236
+ encoded_inputs,
1237
+ max_length=max_length,
1238
+ padding=padding_strategy.value,
1239
+ pad_to_multiple_of=pad_to_multiple_of,
1240
+ return_attention_mask=return_attention_mask,
1241
+ )
1242
+
1243
+ if return_length:
1244
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1245
+
1246
+ batch_outputs = BatchEncoding(
1247
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
1248
+ )
1249
+
1250
+ return batch_outputs
1251
+
1252
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.truncate_sequences
1253
+ def truncate_sequences(
1254
+ self,
1255
+ ids: List[int],
1256
+ token_boxes: List[List[int]],
1257
+ pair_ids: Optional[List[int]] = None,
1258
+ pair_token_boxes: Optional[List[List[int]]] = None,
1259
+ labels: Optional[List[int]] = None,
1260
+ num_tokens_to_remove: int = 0,
1261
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
1262
+ stride: int = 0,
1263
+ ) -> Tuple[List[int], List[int], List[int]]:
1264
+ """
1265
+ Truncates a sequence pair in-place following the strategy.
1266
+
1267
+ Args:
1268
+ ids (`List[int]`):
1269
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
1270
+ `convert_tokens_to_ids` methods.
1271
+ token_boxes (`List[List[int]]`):
1272
+ Bounding boxes of the first sequence.
1273
+ pair_ids (`List[int]`, *optional*):
1274
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
1275
+ and `convert_tokens_to_ids` methods.
1276
+ pair_token_boxes (`List[List[int]]`, *optional*):
1277
+ Bounding boxes of the second sequence.
1278
+ labels (`List[int]`, *optional*):
1279
+ Labels of the first sequence (for token classification tasks).
1280
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
1281
+ Number of tokens to remove using the truncation strategy.
1282
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
1283
+ The strategy to follow for truncation. Can be:
1284
+
1285
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1286
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
1287
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
1288
+ batch of pairs) is provided.
1289
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1290
+ maximum acceptable input length for the model if that argument is not provided. This will only
1291
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1292
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1293
+ maximum acceptable input length for the model if that argument is not provided. This will only
1294
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1295
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1296
+ than the model maximum admissible input size).
1297
+ stride (`int`, *optional*, defaults to 0):
1298
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1299
+ sequence returned. The value of this argument defines the number of additional tokens.
1300
+
1301
+ Returns:
1302
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1303
+ overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
1304
+ of sequences (or a batch of pairs) is provided.
1305
+ """
1306
+ if num_tokens_to_remove <= 0:
1307
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
1308
+
1309
+ if not isinstance(truncation_strategy, TruncationStrategy):
1310
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1311
+
1312
+ overflowing_tokens = []
1313
+ overflowing_token_boxes = []
1314
+ overflowing_labels = []
1315
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
1316
+ truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
1317
+ ):
1318
+ if len(ids) > num_tokens_to_remove:
1319
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1320
+ overflowing_tokens = ids[-window_len:]
1321
+ overflowing_token_boxes = token_boxes[-window_len:]
1322
+ overflowing_labels = labels[-window_len:]
1323
+ ids = ids[:-num_tokens_to_remove]
1324
+ token_boxes = token_boxes[:-num_tokens_to_remove]
1325
+ labels = labels[:-num_tokens_to_remove]
1326
+ else:
1327
+ error_msg = (
1328
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1329
+ f"but the first sequence has a length {len(ids)}. "
1330
+ )
1331
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST:
1332
+ error_msg = (
1333
+ error_msg + "Please select another truncation strategy than "
1334
+ f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
1335
+ )
1336
+ logger.error(error_msg)
1337
+ elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1338
+ logger.warning(
1339
+ "Be aware, overflowing tokens are not returned for the setting you have chosen,"
1340
+ f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
1341
+ "truncation strategy. So the returned list will always be empty even if some "
1342
+ "tokens have been removed."
1343
+ )
1344
+ for _ in range(num_tokens_to_remove):
1345
+ if pair_ids is None or len(ids) > len(pair_ids):
1346
+ ids = ids[:-1]
1347
+ token_boxes = token_boxes[:-1]
1348
+ labels = labels[:-1]
1349
+ else:
1350
+ pair_ids = pair_ids[:-1]
1351
+ pair_token_boxes = pair_token_boxes[:-1]
1352
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1353
+ if len(pair_ids) > num_tokens_to_remove:
1354
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1355
+ overflowing_tokens = pair_ids[-window_len:]
1356
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
1357
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1358
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
1359
+ else:
1360
+ logger.error(
1361
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1362
+ f"but the second sequence has a length {len(pair_ids)}. "
1363
+ f"Please select another truncation strategy than {truncation_strategy}, "
1364
+ "for instance 'longest_first' or 'only_first'."
1365
+ )
1366
+
1367
+ return (
1368
+ ids,
1369
+ token_boxes,
1370
+ pair_ids,
1371
+ pair_token_boxes,
1372
+ labels,
1373
+ overflowing_tokens,
1374
+ overflowing_token_boxes,
1375
+ overflowing_labels,
1376
+ )
1377
+
1378
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._pad
1379
+ def _pad(
1380
+ self,
1381
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1382
+ max_length: Optional[int] = None,
1383
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1384
+ pad_to_multiple_of: Optional[int] = None,
1385
+ return_attention_mask: Optional[bool] = None,
1386
+ ) -> dict:
1387
+ """
1388
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1389
+
1390
+ Args:
1391
+ encoded_inputs:
1392
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1393
+ max_length: maximum length of the returned list and optionally padding length (see below).
1394
+ Will truncate by taking into account the special tokens.
1395
+ padding_strategy: PaddingStrategy to use for padding.
1396
+
1397
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1398
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1399
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1400
+ The tokenizer padding sides are defined in self.padding_side:
1401
+
1402
+ - 'left': pads on the left of the sequences
1403
+ - 'right': pads on the right of the sequences
1404
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1405
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1406
+ `>= 7.5` (Volta).
1407
+ return_attention_mask:
1408
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1409
+ """
1410
+ # Load from model defaults
1411
+ if return_attention_mask is None:
1412
+ return_attention_mask = "attention_mask" in self.model_input_names
1413
+
1414
+ required_input = encoded_inputs[self.model_input_names[0]]
1415
+
1416
+ if padding_strategy == PaddingStrategy.LONGEST:
1417
+ max_length = len(required_input)
1418
+
1419
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1420
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1421
+
1422
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1423
+
1424
+ # Initialize attention mask if not present.
1425
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1426
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1427
+
1428
+ if needs_to_be_padded:
1429
+ difference = max_length - len(required_input)
1430
+ if self.padding_side == "right":
1431
+ if return_attention_mask:
1432
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1433
+ if "token_type_ids" in encoded_inputs:
1434
+ encoded_inputs["token_type_ids"] = (
1435
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1436
+ )
1437
+ if "bbox" in encoded_inputs:
1438
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
1439
+ if "labels" in encoded_inputs:
1440
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1441
+ if "special_tokens_mask" in encoded_inputs:
1442
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1443
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1444
+ elif self.padding_side == "left":
1445
+ if return_attention_mask:
1446
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1447
+ if "token_type_ids" in encoded_inputs:
1448
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1449
+ "token_type_ids"
1450
+ ]
1451
+ if "bbox" in encoded_inputs:
1452
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
1453
+ if "labels" in encoded_inputs:
1454
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1455
+ if "special_tokens_mask" in encoded_inputs:
1456
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1457
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1458
+ else:
1459
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1460
+
1461
+ return encoded_inputs
venv/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Fast tokenization class for LayoutLMv3. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
17
+ and _encode_plus, in which the Rust tokenizer is used.
18
+ """
19
+
20
+ import json
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ from tokenizers import pre_tokenizers, processors
24
+
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PaddingStrategy,
29
+ PreTokenizedInput,
30
+ TensorType,
31
+ TextInput,
32
+ TextInputPair,
33
+ TruncationStrategy,
34
+ )
35
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
36
+ from ...utils import add_end_docstrings, logging
37
+ from .tokenization_layoutlmv3 import (
38
+ LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING,
39
+ LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
40
+ LayoutLMv3Tokenizer,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
47
+
48
+
49
+ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
50
+ r"""
51
+ Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.
52
+
53
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
54
+ refer to this superclass for more information regarding those methods.
55
+
56
+ Args:
57
+ vocab_file (`str`):
58
+ Path to the vocabulary file.
59
+ merges_file (`str`):
60
+ Path to the merges file.
61
+ errors (`str`, *optional*, defaults to `"replace"`):
62
+ Paradigm to follow when decoding bytes to UTF-8. See
63
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
64
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
65
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
66
+
67
+ <Tip>
68
+
69
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
70
+ sequence. The token used is the `cls_token`.
71
+
72
+ </Tip>
73
+
74
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
75
+ The end of sequence token.
76
+
77
+ <Tip>
78
+
79
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
80
+ The token used is the `sep_token`.
81
+
82
+ </Tip>
83
+
84
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
85
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
86
+ sequence classification or for a text and a question for question answering. It is also used as the last
87
+ token of a sequence built with special tokens.
88
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
89
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
90
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
91
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
92
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
93
+ token instead.
94
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
95
+ The token used for padding, for example when batching sequences of different lengths.
96
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
97
+ The token used for masking values. This is the token used when training this model with masked language
98
+ modeling. This is the token which the model will try to predict.
99
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
100
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
101
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
102
+ trim_offsets (`bool`, *optional*, defaults to `True`):
103
+ Whether the post processing step should trim offsets to avoid including whitespaces.
104
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
105
+ The bounding box to use for the special [CLS] token.
106
+ sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
107
+ The bounding box to use for the special [SEP] token.
108
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
109
+ The bounding box to use for the special [PAD] token.
110
+ pad_token_label (`int`, *optional*, defaults to -100):
111
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
112
+ CrossEntropyLoss.
113
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
114
+ Whether or not to only label the first subword, in case word labels are provided.
115
+ """
116
+
117
+ vocab_files_names = VOCAB_FILES_NAMES
118
+ model_input_names = ["input_ids", "attention_mask"]
119
+ slow_tokenizer_class = LayoutLMv3Tokenizer
120
+
121
+ def __init__(
122
+ self,
123
+ vocab_file=None,
124
+ merges_file=None,
125
+ tokenizer_file=None,
126
+ errors="replace",
127
+ bos_token="<s>",
128
+ eos_token="</s>",
129
+ sep_token="</s>",
130
+ cls_token="<s>",
131
+ unk_token="<unk>",
132
+ pad_token="<pad>",
133
+ mask_token="<mask>",
134
+ add_prefix_space=True,
135
+ trim_offsets=True,
136
+ cls_token_box=[0, 0, 0, 0],
137
+ sep_token_box=[0, 0, 0, 0],
138
+ pad_token_box=[0, 0, 0, 0],
139
+ pad_token_label=-100,
140
+ only_label_first_subword=True,
141
+ **kwargs,
142
+ ):
143
+ super().__init__(
144
+ vocab_file,
145
+ merges_file,
146
+ tokenizer_file=tokenizer_file,
147
+ errors=errors,
148
+ bos_token=bos_token,
149
+ eos_token=eos_token,
150
+ sep_token=sep_token,
151
+ cls_token=cls_token,
152
+ unk_token=unk_token,
153
+ pad_token=pad_token,
154
+ mask_token=mask_token,
155
+ add_prefix_space=add_prefix_space,
156
+ trim_offsets=trim_offsets,
157
+ cls_token_box=cls_token_box,
158
+ sep_token_box=sep_token_box,
159
+ pad_token_box=pad_token_box,
160
+ pad_token_label=pad_token_label,
161
+ only_label_first_subword=only_label_first_subword,
162
+ **kwargs,
163
+ )
164
+
165
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
166
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
167
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
168
+ pre_tok_state["add_prefix_space"] = add_prefix_space
169
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
170
+
171
+ self.add_prefix_space = add_prefix_space
172
+
173
+ tokenizer_component = "post_processor"
174
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
175
+ if tokenizer_component_instance:
176
+ state = json.loads(tokenizer_component_instance.__getstate__())
177
+
178
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
179
+ if "sep" in state:
180
+ state["sep"] = tuple(state["sep"])
181
+ if "cls" in state:
182
+ state["cls"] = tuple(state["cls"])
183
+
184
+ changes_to_apply = False
185
+
186
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
187
+ state["add_prefix_space"] = add_prefix_space
188
+ changes_to_apply = True
189
+
190
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
191
+ state["trim_offsets"] = trim_offsets
192
+ changes_to_apply = True
193
+
194
+ if changes_to_apply:
195
+ component_class = getattr(processors, state.pop("type"))
196
+ new_value = component_class(**state)
197
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
198
+
199
+ # additional properties
200
+ self.cls_token_box = cls_token_box
201
+ self.sep_token_box = sep_token_box
202
+ self.pad_token_box = pad_token_box
203
+ self.pad_token_label = pad_token_label
204
+ self.only_label_first_subword = only_label_first_subword
205
+
206
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
207
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.__call__
208
+ def __call__(
209
+ self,
210
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
211
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
212
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
213
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
214
+ add_special_tokens: bool = True,
215
+ padding: Union[bool, str, PaddingStrategy] = False,
216
+ truncation: Union[bool, str, TruncationStrategy] = None,
217
+ max_length: Optional[int] = None,
218
+ stride: int = 0,
219
+ pad_to_multiple_of: Optional[int] = None,
220
+ return_tensors: Optional[Union[str, TensorType]] = None,
221
+ return_token_type_ids: Optional[bool] = None,
222
+ return_attention_mask: Optional[bool] = None,
223
+ return_overflowing_tokens: bool = False,
224
+ return_special_tokens_mask: bool = False,
225
+ return_offsets_mapping: bool = False,
226
+ return_length: bool = False,
227
+ verbose: bool = True,
228
+ **kwargs,
229
+ ) -> BatchEncoding:
230
+ """
231
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
232
+ sequences with word-level normalized bounding boxes and optional labels.
233
+
234
+ Args:
235
+ text (`str`, `List[str]`, `List[List[str]]`):
236
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
237
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
238
+ words).
239
+ text_pair (`List[str]`, `List[List[str]]`):
240
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
241
+ (pretokenized string).
242
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
243
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
244
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
245
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
246
+ """
247
+
248
+ # Input type checking for clearer error
249
+ def _is_valid_text_input(t):
250
+ if isinstance(t, str):
251
+ # Strings are fine
252
+ return True
253
+ elif isinstance(t, (list, tuple)):
254
+ # List are fine as long as they are...
255
+ if len(t) == 0:
256
+ # ... empty
257
+ return True
258
+ elif isinstance(t[0], str):
259
+ # ... list of strings
260
+ return True
261
+ elif isinstance(t[0], (list, tuple)):
262
+ # ... list with an empty list or with a list of strings
263
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
264
+ else:
265
+ return False
266
+ else:
267
+ return False
268
+
269
+ if text_pair is not None:
270
+ # in case text + text_pair are provided, text = questions, text_pair = words
271
+ if not _is_valid_text_input(text):
272
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
273
+ if not isinstance(text_pair, (list, tuple)):
274
+ raise ValueError(
275
+ "Words must be of type `List[str]` (single pretokenized example), "
276
+ "or `List[List[str]]` (batch of pretokenized examples)."
277
+ )
278
+ else:
279
+ # in case only text is provided => must be words
280
+ if not isinstance(text, (list, tuple)):
281
+ raise ValueError(
282
+ "Words must be of type `List[str]` (single pretokenized example), "
283
+ "or `List[List[str]]` (batch of pretokenized examples)."
284
+ )
285
+
286
+ if text_pair is not None:
287
+ is_batched = isinstance(text, (list, tuple))
288
+ else:
289
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
290
+
291
+ words = text if text_pair is None else text_pair
292
+ if boxes is None:
293
+ raise ValueError("You must provide corresponding bounding boxes")
294
+ if is_batched:
295
+ if len(words) != len(boxes):
296
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
297
+ for words_example, boxes_example in zip(words, boxes):
298
+ if len(words_example) != len(boxes_example):
299
+ raise ValueError("You must provide as many words as there are bounding boxes")
300
+ else:
301
+ if len(words) != len(boxes):
302
+ raise ValueError("You must provide as many words as there are bounding boxes")
303
+
304
+ if is_batched:
305
+ if text_pair is not None and len(text) != len(text_pair):
306
+ raise ValueError(
307
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
308
+ f" {len(text_pair)}."
309
+ )
310
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
311
+ is_pair = bool(text_pair is not None)
312
+ return self.batch_encode_plus(
313
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
314
+ is_pair=is_pair,
315
+ boxes=boxes,
316
+ word_labels=word_labels,
317
+ add_special_tokens=add_special_tokens,
318
+ padding=padding,
319
+ truncation=truncation,
320
+ max_length=max_length,
321
+ stride=stride,
322
+ pad_to_multiple_of=pad_to_multiple_of,
323
+ return_tensors=return_tensors,
324
+ return_token_type_ids=return_token_type_ids,
325
+ return_attention_mask=return_attention_mask,
326
+ return_overflowing_tokens=return_overflowing_tokens,
327
+ return_special_tokens_mask=return_special_tokens_mask,
328
+ return_offsets_mapping=return_offsets_mapping,
329
+ return_length=return_length,
330
+ verbose=verbose,
331
+ **kwargs,
332
+ )
333
+ else:
334
+ return self.encode_plus(
335
+ text=text,
336
+ text_pair=text_pair,
337
+ boxes=boxes,
338
+ word_labels=word_labels,
339
+ add_special_tokens=add_special_tokens,
340
+ padding=padding,
341
+ truncation=truncation,
342
+ max_length=max_length,
343
+ stride=stride,
344
+ pad_to_multiple_of=pad_to_multiple_of,
345
+ return_tensors=return_tensors,
346
+ return_token_type_ids=return_token_type_ids,
347
+ return_attention_mask=return_attention_mask,
348
+ return_overflowing_tokens=return_overflowing_tokens,
349
+ return_special_tokens_mask=return_special_tokens_mask,
350
+ return_offsets_mapping=return_offsets_mapping,
351
+ return_length=return_length,
352
+ verbose=verbose,
353
+ **kwargs,
354
+ )
355
+
356
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
357
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.batch_encode_plus
358
+ def batch_encode_plus(
359
+ self,
360
+ batch_text_or_text_pairs: Union[
361
+ List[TextInput],
362
+ List[TextInputPair],
363
+ List[PreTokenizedInput],
364
+ ],
365
+ is_pair: bool = None,
366
+ boxes: Optional[List[List[List[int]]]] = None,
367
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
368
+ add_special_tokens: bool = True,
369
+ padding: Union[bool, str, PaddingStrategy] = False,
370
+ truncation: Union[bool, str, TruncationStrategy] = None,
371
+ max_length: Optional[int] = None,
372
+ stride: int = 0,
373
+ pad_to_multiple_of: Optional[int] = None,
374
+ return_tensors: Optional[Union[str, TensorType]] = None,
375
+ return_token_type_ids: Optional[bool] = None,
376
+ return_attention_mask: Optional[bool] = None,
377
+ return_overflowing_tokens: bool = False,
378
+ return_special_tokens_mask: bool = False,
379
+ return_offsets_mapping: bool = False,
380
+ return_length: bool = False,
381
+ verbose: bool = True,
382
+ **kwargs,
383
+ ) -> BatchEncoding:
384
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
385
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
386
+ padding=padding,
387
+ truncation=truncation,
388
+ max_length=max_length,
389
+ pad_to_multiple_of=pad_to_multiple_of,
390
+ verbose=verbose,
391
+ **kwargs,
392
+ )
393
+
394
+ return self._batch_encode_plus(
395
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
396
+ is_pair=is_pair,
397
+ boxes=boxes,
398
+ word_labels=word_labels,
399
+ add_special_tokens=add_special_tokens,
400
+ padding_strategy=padding_strategy,
401
+ truncation_strategy=truncation_strategy,
402
+ max_length=max_length,
403
+ stride=stride,
404
+ pad_to_multiple_of=pad_to_multiple_of,
405
+ return_tensors=return_tensors,
406
+ return_token_type_ids=return_token_type_ids,
407
+ return_attention_mask=return_attention_mask,
408
+ return_overflowing_tokens=return_overflowing_tokens,
409
+ return_special_tokens_mask=return_special_tokens_mask,
410
+ return_offsets_mapping=return_offsets_mapping,
411
+ return_length=return_length,
412
+ verbose=verbose,
413
+ **kwargs,
414
+ )
415
+
416
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.tokenize
417
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
418
+ batched_input = [(text, pair)] if pair else [text]
419
+ encodings = self._tokenizer.encode_batch(
420
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
421
+ )
422
+
423
+ return encodings[0].tokens
424
+
425
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
426
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.encode_plus
427
+ def encode_plus(
428
+ self,
429
+ text: Union[TextInput, PreTokenizedInput],
430
+ text_pair: Optional[PreTokenizedInput] = None,
431
+ boxes: Optional[List[List[int]]] = None,
432
+ word_labels: Optional[List[int]] = None,
433
+ add_special_tokens: bool = True,
434
+ padding: Union[bool, str, PaddingStrategy] = False,
435
+ truncation: Union[bool, str, TruncationStrategy] = None,
436
+ max_length: Optional[int] = None,
437
+ stride: int = 0,
438
+ pad_to_multiple_of: Optional[int] = None,
439
+ return_tensors: Optional[Union[str, TensorType]] = None,
440
+ return_token_type_ids: Optional[bool] = None,
441
+ return_attention_mask: Optional[bool] = None,
442
+ return_overflowing_tokens: bool = False,
443
+ return_special_tokens_mask: bool = False,
444
+ return_offsets_mapping: bool = False,
445
+ return_length: bool = False,
446
+ verbose: bool = True,
447
+ **kwargs,
448
+ ) -> BatchEncoding:
449
+ """
450
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
451
+ `__call__` should be used instead.
452
+
453
+ Args:
454
+ text (`str`, `List[str]`, `List[List[str]]`):
455
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
456
+ text_pair (`List[str]` or `List[int]`, *optional*):
457
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
458
+ list of list of strings (words of a batch of examples).
459
+ """
460
+
461
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
462
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
463
+ padding=padding,
464
+ truncation=truncation,
465
+ max_length=max_length,
466
+ pad_to_multiple_of=pad_to_multiple_of,
467
+ verbose=verbose,
468
+ **kwargs,
469
+ )
470
+
471
+ return self._encode_plus(
472
+ text=text,
473
+ boxes=boxes,
474
+ text_pair=text_pair,
475
+ word_labels=word_labels,
476
+ add_special_tokens=add_special_tokens,
477
+ padding_strategy=padding_strategy,
478
+ truncation_strategy=truncation_strategy,
479
+ max_length=max_length,
480
+ stride=stride,
481
+ pad_to_multiple_of=pad_to_multiple_of,
482
+ return_tensors=return_tensors,
483
+ return_token_type_ids=return_token_type_ids,
484
+ return_attention_mask=return_attention_mask,
485
+ return_overflowing_tokens=return_overflowing_tokens,
486
+ return_special_tokens_mask=return_special_tokens_mask,
487
+ return_offsets_mapping=return_offsets_mapping,
488
+ return_length=return_length,
489
+ verbose=verbose,
490
+ **kwargs,
491
+ )
492
+
493
+ def _batch_encode_plus(
494
+ self,
495
+ batch_text_or_text_pairs: Union[
496
+ List[TextInput],
497
+ List[TextInputPair],
498
+ List[PreTokenizedInput],
499
+ ],
500
+ is_pair: bool = None,
501
+ boxes: Optional[List[List[List[int]]]] = None,
502
+ word_labels: Optional[List[List[int]]] = None,
503
+ add_special_tokens: bool = True,
504
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
505
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
506
+ max_length: Optional[int] = None,
507
+ stride: int = 0,
508
+ pad_to_multiple_of: Optional[int] = None,
509
+ return_tensors: Optional[str] = None,
510
+ return_token_type_ids: Optional[bool] = None,
511
+ return_attention_mask: Optional[bool] = None,
512
+ return_overflowing_tokens: bool = False,
513
+ return_special_tokens_mask: bool = False,
514
+ return_offsets_mapping: bool = False,
515
+ return_length: bool = False,
516
+ verbose: bool = True,
517
+ ) -> BatchEncoding:
518
+ if not isinstance(batch_text_or_text_pairs, list):
519
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
520
+
521
+ # Set the truncation and padding strategy and restore the initial configuration
522
+ self.set_truncation_and_padding(
523
+ padding_strategy=padding_strategy,
524
+ truncation_strategy=truncation_strategy,
525
+ max_length=max_length,
526
+ stride=stride,
527
+ pad_to_multiple_of=pad_to_multiple_of,
528
+ )
529
+
530
+ if is_pair:
531
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
532
+
533
+ encodings = self._tokenizer.encode_batch(
534
+ batch_text_or_text_pairs,
535
+ add_special_tokens=add_special_tokens,
536
+ is_pretokenized=True, # we set this to True as LayoutLMv3 always expects pretokenized inputs
537
+ )
538
+
539
+ # Convert encoding to dict
540
+ # `Tokens` has type: Tuple[
541
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
542
+ # List[EncodingFast]
543
+ # ]
544
+ # with nested dimensions corresponding to batch, overflows, sequence length
545
+ tokens_and_encodings = [
546
+ self._convert_encoding(
547
+ encoding=encoding,
548
+ return_token_type_ids=return_token_type_ids,
549
+ return_attention_mask=return_attention_mask,
550
+ return_overflowing_tokens=return_overflowing_tokens,
551
+ return_special_tokens_mask=return_special_tokens_mask,
552
+ return_offsets_mapping=True
553
+ if word_labels is not None
554
+ else return_offsets_mapping, # we use offsets to create the labels
555
+ return_length=return_length,
556
+ verbose=verbose,
557
+ )
558
+ for encoding in encodings
559
+ ]
560
+
561
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
562
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
563
+ # (we say ~ because the number of overflow varies with the example in the batch)
564
+ #
565
+ # To match each overflowing sample with the original sample in the batch
566
+ # we add an overflow_to_sample_mapping array (see below)
567
+ sanitized_tokens = {}
568
+ for key in tokens_and_encodings[0][0].keys():
569
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
570
+ sanitized_tokens[key] = stack
571
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
572
+
573
+ # If returning overflowing tokens, we need to return a mapping
574
+ # from the batch idx to the original sample
575
+ if return_overflowing_tokens:
576
+ overflow_to_sample_mapping = []
577
+ for i, (toks, _) in enumerate(tokens_and_encodings):
578
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
579
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
580
+
581
+ for input_ids in sanitized_tokens["input_ids"]:
582
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
583
+
584
+ # create the token boxes
585
+ token_boxes = []
586
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
587
+ if return_overflowing_tokens:
588
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
589
+ else:
590
+ original_index = batch_index
591
+ token_boxes_example = []
592
+ for id, sequence_id, word_id in zip(
593
+ sanitized_tokens["input_ids"][batch_index],
594
+ sanitized_encodings[batch_index].sequence_ids,
595
+ sanitized_encodings[batch_index].word_ids,
596
+ ):
597
+ if word_id is not None:
598
+ if is_pair and sequence_id == 0:
599
+ token_boxes_example.append(self.pad_token_box)
600
+ else:
601
+ token_boxes_example.append(boxes[original_index][word_id])
602
+ else:
603
+ if id == self.cls_token_id:
604
+ token_boxes_example.append(self.cls_token_box)
605
+ elif id == self.sep_token_id:
606
+ token_boxes_example.append(self.sep_token_box)
607
+ elif id == self.pad_token_id:
608
+ token_boxes_example.append(self.pad_token_box)
609
+ else:
610
+ raise ValueError("Id not recognized")
611
+ token_boxes.append(token_boxes_example)
612
+
613
+ sanitized_tokens["bbox"] = token_boxes
614
+
615
+ # optionally, create the labels
616
+ if word_labels is not None:
617
+ labels = []
618
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
619
+ if return_overflowing_tokens:
620
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
621
+ else:
622
+ original_index = batch_index
623
+ labels_example = []
624
+ previous_token_empty = False
625
+ for id, offset, word_id in zip(
626
+ sanitized_tokens["input_ids"][batch_index],
627
+ sanitized_tokens["offset_mapping"][batch_index],
628
+ sanitized_encodings[batch_index].word_ids,
629
+ ):
630
+ if word_id is not None:
631
+ if self.only_label_first_subword:
632
+ if offset[0] == 0 and not previous_token_empty:
633
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
634
+ labels_example.append(word_labels[original_index][word_id])
635
+ else:
636
+ labels_example.append(self.pad_token_label)
637
+ if offset == (0, 0):
638
+ previous_token_empty = True
639
+ else:
640
+ previous_token_empty = False
641
+ else:
642
+ labels_example.append(word_labels[original_index][word_id])
643
+ else:
644
+ labels_example.append(self.pad_token_label)
645
+ labels.append(labels_example)
646
+
647
+ sanitized_tokens["labels"] = labels
648
+ # finally, remove offsets if the user didn't want them
649
+ if not return_offsets_mapping:
650
+ del sanitized_tokens["offset_mapping"]
651
+
652
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
653
+
654
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._encode_plus
655
+ def _encode_plus(
656
+ self,
657
+ text: Union[TextInput, PreTokenizedInput],
658
+ text_pair: Optional[PreTokenizedInput] = None,
659
+ boxes: Optional[List[List[int]]] = None,
660
+ word_labels: Optional[List[int]] = None,
661
+ add_special_tokens: bool = True,
662
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
663
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
664
+ max_length: Optional[int] = None,
665
+ stride: int = 0,
666
+ pad_to_multiple_of: Optional[int] = None,
667
+ return_tensors: Optional[bool] = None,
668
+ return_token_type_ids: Optional[bool] = None,
669
+ return_attention_mask: Optional[bool] = None,
670
+ return_overflowing_tokens: bool = False,
671
+ return_special_tokens_mask: bool = False,
672
+ return_offsets_mapping: bool = False,
673
+ return_length: bool = False,
674
+ verbose: bool = True,
675
+ **kwargs,
676
+ ) -> BatchEncoding:
677
+ # make it a batched input
678
+ # 2 options:
679
+ # 1) only text, in case text must be a list of str
680
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
681
+ batched_input = [(text, text_pair)] if text_pair else [text]
682
+ batched_boxes = [boxes]
683
+ batched_word_labels = [word_labels] if word_labels is not None else None
684
+ batched_output = self._batch_encode_plus(
685
+ batched_input,
686
+ is_pair=bool(text_pair is not None),
687
+ boxes=batched_boxes,
688
+ word_labels=batched_word_labels,
689
+ add_special_tokens=add_special_tokens,
690
+ padding_strategy=padding_strategy,
691
+ truncation_strategy=truncation_strategy,
692
+ max_length=max_length,
693
+ stride=stride,
694
+ pad_to_multiple_of=pad_to_multiple_of,
695
+ return_tensors=return_tensors,
696
+ return_token_type_ids=return_token_type_ids,
697
+ return_attention_mask=return_attention_mask,
698
+ return_overflowing_tokens=return_overflowing_tokens,
699
+ return_special_tokens_mask=return_special_tokens_mask,
700
+ return_offsets_mapping=return_offsets_mapping,
701
+ return_length=return_length,
702
+ verbose=verbose,
703
+ **kwargs,
704
+ )
705
+
706
+ # Return tensor is None, then we can remove the leading batch axis
707
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
708
+ if return_tensors is None and not return_overflowing_tokens:
709
+ batched_output = BatchEncoding(
710
+ {
711
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
712
+ for key, value in batched_output.items()
713
+ },
714
+ batched_output.encodings,
715
+ )
716
+
717
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
718
+
719
+ return batched_output
720
+
721
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._pad
722
+ def _pad(
723
+ self,
724
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
725
+ max_length: Optional[int] = None,
726
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
727
+ pad_to_multiple_of: Optional[int] = None,
728
+ return_attention_mask: Optional[bool] = None,
729
+ ) -> dict:
730
+ """
731
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
732
+
733
+ Args:
734
+ encoded_inputs:
735
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
736
+ max_length: maximum length of the returned list and optionally padding length (see below).
737
+ Will truncate by taking into account the special tokens.
738
+ padding_strategy: PaddingStrategy to use for padding.
739
+
740
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
741
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
742
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
743
+ The tokenizer padding sides are defined in self.padding_side:
744
+
745
+ - 'left': pads on the left of the sequences
746
+ - 'right': pads on the right of the sequences
747
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
748
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
749
+ `>= 7.5` (Volta).
750
+ return_attention_mask:
751
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
752
+ """
753
+ # Load from model defaults
754
+ if return_attention_mask is None:
755
+ return_attention_mask = "attention_mask" in self.model_input_names
756
+
757
+ required_input = encoded_inputs[self.model_input_names[0]]
758
+
759
+ if padding_strategy == PaddingStrategy.LONGEST:
760
+ max_length = len(required_input)
761
+
762
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
763
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
764
+
765
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
766
+
767
+ # Initialize attention mask if not present.
768
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
769
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
770
+
771
+ if needs_to_be_padded:
772
+ difference = max_length - len(required_input)
773
+ if self.padding_side == "right":
774
+ if return_attention_mask:
775
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
776
+ if "token_type_ids" in encoded_inputs:
777
+ encoded_inputs["token_type_ids"] = (
778
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
779
+ )
780
+ if "bbox" in encoded_inputs:
781
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
782
+ if "labels" in encoded_inputs:
783
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
784
+ if "special_tokens_mask" in encoded_inputs:
785
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
786
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
787
+ elif self.padding_side == "left":
788
+ if return_attention_mask:
789
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
790
+ if "token_type_ids" in encoded_inputs:
791
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
792
+ "token_type_ids"
793
+ ]
794
+ if "bbox" in encoded_inputs:
795
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
796
+ if "labels" in encoded_inputs:
797
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
798
+ if "special_tokens_mask" in encoded_inputs:
799
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
800
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
801
+ else:
802
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
803
+
804
+ return encoded_inputs
805
+
806
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.save_vocabulary
807
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
808
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
809
+ return tuple(files)
810
+
811
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
812
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
813
+ if token_ids_1 is None:
814
+ return output
815
+
816
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
817
+
818
+ def create_token_type_ids_from_sequences(
819
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
820
+ ) -> List[int]:
821
+ """
822
+ Args:
823
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
824
+ make use of token type ids, therefore a list of zeros is returned.
825
+ token_ids_0 (`List[int]`):
826
+ List of IDs.
827
+ token_ids_1 (`List[int]`, *optional*):
828
+ Optional second list of IDs for sequence pairs.
829
+ Returns:
830
+ `List[int]`: List of zeros.
831
+ """
832
+ sep = [self.sep_token_id]
833
+ cls = [self.cls_token_id]
834
+
835
+ if token_ids_1 is None:
836
+ return len(cls + token_ids_0 + sep) * [0]
837
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
venv/lib/python3.10/site-packages/transformers/models/mgp_str/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
25
+ "processing_mgp_str": ["MgpstrProcessor"],
26
+ "tokenization_mgp_str": ["MgpstrTokenizer"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_mgp_str"] = [
36
+ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "MgpstrModel",
38
+ "MgpstrPreTrainedModel",
39
+ "MgpstrForSceneTextRecognition",
40
+ ]
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
44
+ from .processing_mgp_str import MgpstrProcessor
45
+ from .tokenization_mgp_str import MgpstrTokenizer
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_mgp_str import (
54
+ MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ MgpstrForSceneTextRecognition,
56
+ MgpstrModel,
57
+ MgpstrPreTrainedModel,
58
+ )
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mgp_str/__pycache__/processing_mgp_str.cpython-310.pyc ADDED
Binary file (8.06 kB). View file