applied-ai-018 commited on
Commit
308a910
·
verified ·
1 Parent(s): 0a5e9a0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/11.post_attention_layernorm.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/20.input_layernorm.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step40/zero/20.input_layernorm.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/20.input_layernorm.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  7. venv/lib/python3.10/site-packages/transformers/models/camembert/__init__.py +142 -0
  8. venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert_fast.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/models/camembert/configuration_camembert.py +155 -0
  11. venv/lib/python3.10/site-packages/transformers/models/camembert/modeling_camembert.py +1571 -0
  12. venv/lib/python3.10/site-packages/transformers/models/camembert/modeling_tf_camembert.py +1793 -0
  13. venv/lib/python3.10/site-packages/transformers/models/camembert/tokenization_camembert.py +319 -0
  14. venv/lib/python3.10/site-packages/transformers/models/camembert/tokenization_camembert_fast.py +199 -0
  15. venv/lib/python3.10/site-packages/transformers/models/kosmos2/__init__.py +64 -0
  16. venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/configuration_kosmos2.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/modeling_kosmos2.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/processing_kosmos2.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/kosmos2/configuration_kosmos2.py +295 -0
  22. venv/lib/python3.10/site-packages/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py +77 -0
  23. venv/lib/python3.10/site-packages/transformers/models/kosmos2/modeling_kosmos2.py +2054 -0
  24. venv/lib/python3.10/site-packages/transformers/models/kosmos2/processing_kosmos2.py +666 -0
  25. venv/lib/python3.10/site-packages/transformers/models/mask2former/__init__.py +75 -0
  26. venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/configuration_mask2former.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/convert_mask2former_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/image_processing_mask2former.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/modeling_mask2former.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/mask2former/configuration_mask2former.py +255 -0
  32. venv/lib/python3.10/site-packages/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py +1019 -0
  33. venv/lib/python3.10/site-packages/transformers/models/mask2former/image_processing_mask2former.py +1253 -0
  34. venv/lib/python3.10/site-packages/transformers/models/mask2former/modeling_mask2former.py +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__init__.py +88 -0
  36. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/feature_extraction_mobilenet_v2.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/image_processing_mobilenet_v2.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/modeling_mobilenet_v2.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +154 -0
  43. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py +178 -0
  44. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py +33 -0
  45. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +373 -0
  46. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +862 -0
  47. venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__init__.py +90 -0
  48. venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/configuration_musicgen_melody.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/convert_musicgen_melody_transformers.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/11.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ee1423c73d1952277ccd2dd312dfa391c714947328e9b3a1ff72c8f5877965a
3
+ size 9387
ckpts/universal/global_step40/zero/11.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a92b84f3df7cdec9901482008ff9d5a5cff3c7f1a743defff331ff9d8aae7e
3
+ size 9293
ckpts/universal/global_step40/zero/20.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf34aac7859876f64fc91a7511e51e1b8ab4b058e55c6a7b5b39f24fd7eb531
3
+ size 9372
ckpts/universal/global_step40/zero/20.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:525d537e347578b5ac8fbfe8b245bd99b73da242b4e7f0467a9f5d617a85b0e1
3
+ size 9387
ckpts/universal/global_step40/zero/20.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c28c2bb3f6c22db68d6bcfc6680b168fe2c50454834c70aff6824d6eab8d98a
3
+ size 9293
ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd2c666dd4d3cbf62a95a565c801e960d90abd3f7efefcf817821ff0211e9d4
3
+ size 33555612
venv/lib/python3.10/site-packages/transformers/models/camembert/__init__.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig", "CamembertOnnxConfig"],
29
+ }
30
+
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_camembert"] = ["CamembertTokenizer"]
38
+
39
+ try:
40
+ if not is_tokenizers_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["tokenization_camembert_fast"] = ["CamembertTokenizerFast"]
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["modeling_camembert"] = [
54
+ "CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
55
+ "CamembertForCausalLM",
56
+ "CamembertForMaskedLM",
57
+ "CamembertForMultipleChoice",
58
+ "CamembertForQuestionAnswering",
59
+ "CamembertForSequenceClassification",
60
+ "CamembertForTokenClassification",
61
+ "CamembertModel",
62
+ "CamembertPreTrainedModel",
63
+ ]
64
+
65
+ try:
66
+ if not is_tf_available():
67
+ raise OptionalDependencyNotAvailable()
68
+ except OptionalDependencyNotAvailable:
69
+ pass
70
+ else:
71
+ _import_structure["modeling_tf_camembert"] = [
72
+ "TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
73
+ "TFCamembertForCausalLM",
74
+ "TFCamembertForMaskedLM",
75
+ "TFCamembertForMultipleChoice",
76
+ "TFCamembertForQuestionAnswering",
77
+ "TFCamembertForSequenceClassification",
78
+ "TFCamembertForTokenClassification",
79
+ "TFCamembertModel",
80
+ "TFCamembertPreTrainedModel",
81
+ ]
82
+
83
+
84
+ if TYPE_CHECKING:
85
+ from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig, CamembertOnnxConfig
86
+
87
+ try:
88
+ if not is_sentencepiece_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ from .tokenization_camembert import CamembertTokenizer
94
+
95
+ try:
96
+ if not is_tokenizers_available():
97
+ raise OptionalDependencyNotAvailable()
98
+ except OptionalDependencyNotAvailable:
99
+ pass
100
+ else:
101
+ from .tokenization_camembert_fast import CamembertTokenizerFast
102
+
103
+ try:
104
+ if not is_torch_available():
105
+ raise OptionalDependencyNotAvailable()
106
+ except OptionalDependencyNotAvailable:
107
+ pass
108
+ else:
109
+ from .modeling_camembert import (
110
+ CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
111
+ CamembertForCausalLM,
112
+ CamembertForMaskedLM,
113
+ CamembertForMultipleChoice,
114
+ CamembertForQuestionAnswering,
115
+ CamembertForSequenceClassification,
116
+ CamembertForTokenClassification,
117
+ CamembertModel,
118
+ CamembertPreTrainedModel,
119
+ )
120
+
121
+ try:
122
+ if not is_tf_available():
123
+ raise OptionalDependencyNotAvailable()
124
+ except OptionalDependencyNotAvailable:
125
+ pass
126
+ else:
127
+ from .modeling_tf_camembert import (
128
+ TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
129
+ TFCamembertForCausalLM,
130
+ TFCamembertForMaskedLM,
131
+ TFCamembertForMultipleChoice,
132
+ TFCamembertForQuestionAnswering,
133
+ TFCamembertForSequenceClassification,
134
+ TFCamembertForTokenClassification,
135
+ TFCamembertModel,
136
+ TFCamembertPreTrainedModel,
137
+ )
138
+
139
+ else:
140
+ import sys
141
+
142
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert_fast.cpython-310.pyc ADDED
Binary file (7.38 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/camembert/configuration_camembert.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ CamemBERT configuration"""
17
+
18
+ from collections import OrderedDict
19
+ from typing import Mapping
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class CamembertConfig(PretrainedConfig):
33
+ """
34
+ This is the configuration class to store the configuration of a [`CamembertModel`] or a [`TFCamembertModel`]. It is
35
+ used to instantiate a Camembert model according to the specified arguments, defining the model architecture.
36
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Camembert
37
+ [almanach/camembert-base](https://huggingface.co/almanach/camembert-base) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 30522):
45
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`CamembertModel`] or [`TFCamembertModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers and the pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
55
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ max_position_embeddings (`int`, *optional*, defaults to 512):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ type_vocab_size (`int`, *optional*, defaults to 2):
66
+ The vocabulary size of the `token_type_ids` passed when calling [`CamembertModel`] or [`TFCamembertModel`].
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
73
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ is_decoder (`bool`, *optional*, defaults to `False`):
78
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
81
+ relevant if `config.is_decoder=True`.
82
+ classifier_dropout (`float`, *optional*):
83
+ The dropout ratio for the classification head.
84
+
85
+ Example:
86
+
87
+ ```python
88
+ >>> from transformers import CamembertConfig, CamembertModel
89
+
90
+ >>> # Initializing a Camembert almanach/camembert-base style configuration
91
+ >>> configuration = CamembertConfig()
92
+
93
+ >>> # Initializing a model (with random weights) from the almanach/camembert-base style configuration
94
+ >>> model = CamembertModel(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "camembert"
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=30522,
105
+ hidden_size=768,
106
+ num_hidden_layers=12,
107
+ num_attention_heads=12,
108
+ intermediate_size=3072,
109
+ hidden_act="gelu",
110
+ hidden_dropout_prob=0.1,
111
+ attention_probs_dropout_prob=0.1,
112
+ max_position_embeddings=512,
113
+ type_vocab_size=2,
114
+ initializer_range=0.02,
115
+ layer_norm_eps=1e-12,
116
+ pad_token_id=1,
117
+ bos_token_id=0,
118
+ eos_token_id=2,
119
+ position_embedding_type="absolute",
120
+ use_cache=True,
121
+ classifier_dropout=None,
122
+ **kwargs,
123
+ ):
124
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
125
+
126
+ self.vocab_size = vocab_size
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.hidden_act = hidden_act
131
+ self.intermediate_size = intermediate_size
132
+ self.hidden_dropout_prob = hidden_dropout_prob
133
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
134
+ self.max_position_embeddings = max_position_embeddings
135
+ self.type_vocab_size = type_vocab_size
136
+ self.initializer_range = initializer_range
137
+ self.layer_norm_eps = layer_norm_eps
138
+ self.position_embedding_type = position_embedding_type
139
+ self.use_cache = use_cache
140
+ self.classifier_dropout = classifier_dropout
141
+
142
+
143
+ class CamembertOnnxConfig(OnnxConfig):
144
+ @property
145
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
146
+ if self.task == "multiple-choice":
147
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
148
+ else:
149
+ dynamic_axis = {0: "batch", 1: "sequence"}
150
+ return OrderedDict(
151
+ [
152
+ ("input_ids", dynamic_axis),
153
+ ("attention_mask", dynamic_axis),
154
+ ]
155
+ )
venv/lib/python3.10/site-packages/transformers/models/camembert/modeling_camembert.py ADDED
@@ -0,0 +1,1571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch CamemBERT model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN, gelu
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ CausalLMOutputWithCrossAttentions,
31
+ MaskedLMOutput,
32
+ MultipleChoiceModelOutput,
33
+ QuestionAnsweringModelOutput,
34
+ SequenceClassifierOutput,
35
+ TokenClassifierOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
39
+ from ...utils import (
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from .configuration_camembert import CamembertConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "almanach/camembert-base"
52
+ _CONFIG_FOR_DOC = "CamembertConfig"
53
+
54
+
55
+ from ..deprecated._archive_maps import CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ CAMEMBERT_START_DOCSTRING = r"""
59
+
60
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
61
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
62
+ etc.)
63
+
64
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
65
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
66
+ and behavior.
67
+
68
+ Parameters:
69
+ config ([`CamembertConfig`]): Model configuration class with all the parameters of the
70
+ model. Initializing with a config file does not load the weights associated with the model, only the
71
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
72
+ """
73
+
74
+
75
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Camembert
76
+ class CamembertEmbeddings(nn.Module):
77
+ """
78
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
79
+ """
80
+
81
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
82
+ def __init__(self, config):
83
+ super().__init__()
84
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
85
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
86
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
87
+
88
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
89
+ # any TensorFlow checkpoint file
90
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
91
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
92
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
93
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
94
+ self.register_buffer(
95
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
96
+ )
97
+ self.register_buffer(
98
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
99
+ )
100
+
101
+ # End copy
102
+ self.padding_idx = config.pad_token_id
103
+ self.position_embeddings = nn.Embedding(
104
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
105
+ )
106
+
107
+ def forward(
108
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
109
+ ):
110
+ if position_ids is None:
111
+ if input_ids is not None:
112
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
113
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
114
+ else:
115
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
116
+
117
+ if input_ids is not None:
118
+ input_shape = input_ids.size()
119
+ else:
120
+ input_shape = inputs_embeds.size()[:-1]
121
+
122
+ seq_length = input_shape[1]
123
+
124
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
125
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
126
+ # issue #5664
127
+ if token_type_ids is None:
128
+ if hasattr(self, "token_type_ids"):
129
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
130
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
131
+ token_type_ids = buffered_token_type_ids_expanded
132
+ else:
133
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
134
+
135
+ if inputs_embeds is None:
136
+ inputs_embeds = self.word_embeddings(input_ids)
137
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
138
+
139
+ embeddings = inputs_embeds + token_type_embeddings
140
+ if self.position_embedding_type == "absolute":
141
+ position_embeddings = self.position_embeddings(position_ids)
142
+ embeddings += position_embeddings
143
+ embeddings = self.LayerNorm(embeddings)
144
+ embeddings = self.dropout(embeddings)
145
+ return embeddings
146
+
147
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
148
+ """
149
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
150
+
151
+ Args:
152
+ inputs_embeds: torch.Tensor
153
+
154
+ Returns: torch.Tensor
155
+ """
156
+ input_shape = inputs_embeds.size()[:-1]
157
+ sequence_length = input_shape[1]
158
+
159
+ position_ids = torch.arange(
160
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
161
+ )
162
+ return position_ids.unsqueeze(0).expand(input_shape)
163
+
164
+
165
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Camembert
166
+ class CamembertSelfAttention(nn.Module):
167
+ def __init__(self, config, position_embedding_type=None):
168
+ super().__init__()
169
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
170
+ raise ValueError(
171
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
172
+ f"heads ({config.num_attention_heads})"
173
+ )
174
+
175
+ self.num_attention_heads = config.num_attention_heads
176
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
177
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
178
+
179
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
180
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
181
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
182
+
183
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
184
+ self.position_embedding_type = position_embedding_type or getattr(
185
+ config, "position_embedding_type", "absolute"
186
+ )
187
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
188
+ self.max_position_embeddings = config.max_position_embeddings
189
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
190
+
191
+ self.is_decoder = config.is_decoder
192
+
193
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
194
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
195
+ x = x.view(new_x_shape)
196
+ return x.permute(0, 2, 1, 3)
197
+
198
+ def forward(
199
+ self,
200
+ hidden_states: torch.Tensor,
201
+ attention_mask: Optional[torch.FloatTensor] = None,
202
+ head_mask: Optional[torch.FloatTensor] = None,
203
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
204
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
205
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
206
+ output_attentions: Optional[bool] = False,
207
+ ) -> Tuple[torch.Tensor]:
208
+ mixed_query_layer = self.query(hidden_states)
209
+
210
+ # If this is instantiated as a cross-attention module, the keys
211
+ # and values come from an encoder; the attention mask needs to be
212
+ # such that the encoder's padding tokens are not attended to.
213
+ is_cross_attention = encoder_hidden_states is not None
214
+
215
+ if is_cross_attention and past_key_value is not None:
216
+ # reuse k,v, cross_attentions
217
+ key_layer = past_key_value[0]
218
+ value_layer = past_key_value[1]
219
+ attention_mask = encoder_attention_mask
220
+ elif is_cross_attention:
221
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
222
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
223
+ attention_mask = encoder_attention_mask
224
+ elif past_key_value is not None:
225
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
226
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
227
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
228
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
229
+ else:
230
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
231
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
232
+
233
+ query_layer = self.transpose_for_scores(mixed_query_layer)
234
+
235
+ use_cache = past_key_value is not None
236
+ if self.is_decoder:
237
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
238
+ # Further calls to cross_attention layer can then reuse all cross-attention
239
+ # key/value_states (first "if" case)
240
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
241
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
242
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
243
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
244
+ past_key_value = (key_layer, value_layer)
245
+
246
+ # Take the dot product between "query" and "key" to get the raw attention scores.
247
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
248
+
249
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
250
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
251
+ if use_cache:
252
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
253
+ -1, 1
254
+ )
255
+ else:
256
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
257
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
258
+ distance = position_ids_l - position_ids_r
259
+
260
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
261
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
262
+
263
+ if self.position_embedding_type == "relative_key":
264
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
265
+ attention_scores = attention_scores + relative_position_scores
266
+ elif self.position_embedding_type == "relative_key_query":
267
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
268
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
269
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
270
+
271
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
272
+ if attention_mask is not None:
273
+ # Apply the attention mask is (precomputed for all layers in CamembertModel forward() function)
274
+ attention_scores = attention_scores + attention_mask
275
+
276
+ # Normalize the attention scores to probabilities.
277
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
278
+
279
+ # This is actually dropping out entire tokens to attend to, which might
280
+ # seem a bit unusual, but is taken from the original Transformer paper.
281
+ attention_probs = self.dropout(attention_probs)
282
+
283
+ # Mask heads if we want to
284
+ if head_mask is not None:
285
+ attention_probs = attention_probs * head_mask
286
+
287
+ context_layer = torch.matmul(attention_probs, value_layer)
288
+
289
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
290
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
291
+ context_layer = context_layer.view(new_context_layer_shape)
292
+
293
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
294
+
295
+ if self.is_decoder:
296
+ outputs = outputs + (past_key_value,)
297
+ return outputs
298
+
299
+
300
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput with Roberta->Camembert
301
+ class CamembertSelfOutput(nn.Module):
302
+ def __init__(self, config):
303
+ super().__init__()
304
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
305
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
306
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
307
+
308
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
309
+ hidden_states = self.dense(hidden_states)
310
+ hidden_states = self.dropout(hidden_states)
311
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
312
+ return hidden_states
313
+
314
+
315
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->Camembert
316
+ class CamembertAttention(nn.Module):
317
+ def __init__(self, config, position_embedding_type=None):
318
+ super().__init__()
319
+ self.self = CamembertSelfAttention(config, position_embedding_type=position_embedding_type)
320
+ self.output = CamembertSelfOutput(config)
321
+ self.pruned_heads = set()
322
+
323
+ def prune_heads(self, heads):
324
+ if len(heads) == 0:
325
+ return
326
+ heads, index = find_pruneable_heads_and_indices(
327
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
328
+ )
329
+
330
+ # Prune linear layers
331
+ self.self.query = prune_linear_layer(self.self.query, index)
332
+ self.self.key = prune_linear_layer(self.self.key, index)
333
+ self.self.value = prune_linear_layer(self.self.value, index)
334
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
335
+
336
+ # Update hyper params and store pruned heads
337
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
338
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
339
+ self.pruned_heads = self.pruned_heads.union(heads)
340
+
341
+ def forward(
342
+ self,
343
+ hidden_states: torch.Tensor,
344
+ attention_mask: Optional[torch.FloatTensor] = None,
345
+ head_mask: Optional[torch.FloatTensor] = None,
346
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
347
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
348
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
349
+ output_attentions: Optional[bool] = False,
350
+ ) -> Tuple[torch.Tensor]:
351
+ self_outputs = self.self(
352
+ hidden_states,
353
+ attention_mask,
354
+ head_mask,
355
+ encoder_hidden_states,
356
+ encoder_attention_mask,
357
+ past_key_value,
358
+ output_attentions,
359
+ )
360
+ attention_output = self.output(self_outputs[0], hidden_states)
361
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
362
+ return outputs
363
+
364
+
365
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Roberta->Camembert
366
+ class CamembertIntermediate(nn.Module):
367
+ def __init__(self, config):
368
+ super().__init__()
369
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
370
+ if isinstance(config.hidden_act, str):
371
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
372
+ else:
373
+ self.intermediate_act_fn = config.hidden_act
374
+
375
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
376
+ hidden_states = self.dense(hidden_states)
377
+ hidden_states = self.intermediate_act_fn(hidden_states)
378
+ return hidden_states
379
+
380
+
381
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Roberta->Camembert
382
+ class CamembertOutput(nn.Module):
383
+ def __init__(self, config):
384
+ super().__init__()
385
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
386
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
387
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
388
+
389
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
390
+ hidden_states = self.dense(hidden_states)
391
+ hidden_states = self.dropout(hidden_states)
392
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
393
+ return hidden_states
394
+
395
+
396
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->Camembert
397
+ class CamembertLayer(nn.Module):
398
+ def __init__(self, config):
399
+ super().__init__()
400
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
401
+ self.seq_len_dim = 1
402
+ self.attention = CamembertAttention(config)
403
+ self.is_decoder = config.is_decoder
404
+ self.add_cross_attention = config.add_cross_attention
405
+ if self.add_cross_attention:
406
+ if not self.is_decoder:
407
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
408
+ self.crossattention = CamembertAttention(config, position_embedding_type="absolute")
409
+ self.intermediate = CamembertIntermediate(config)
410
+ self.output = CamembertOutput(config)
411
+
412
+ def forward(
413
+ self,
414
+ hidden_states: torch.Tensor,
415
+ attention_mask: Optional[torch.FloatTensor] = None,
416
+ head_mask: Optional[torch.FloatTensor] = None,
417
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
418
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
419
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
420
+ output_attentions: Optional[bool] = False,
421
+ ) -> Tuple[torch.Tensor]:
422
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
423
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
424
+ self_attention_outputs = self.attention(
425
+ hidden_states,
426
+ attention_mask,
427
+ head_mask,
428
+ output_attentions=output_attentions,
429
+ past_key_value=self_attn_past_key_value,
430
+ )
431
+ attention_output = self_attention_outputs[0]
432
+
433
+ # if decoder, the last output is tuple of self-attn cache
434
+ if self.is_decoder:
435
+ outputs = self_attention_outputs[1:-1]
436
+ present_key_value = self_attention_outputs[-1]
437
+ else:
438
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
439
+
440
+ cross_attn_present_key_value = None
441
+ if self.is_decoder and encoder_hidden_states is not None:
442
+ if not hasattr(self, "crossattention"):
443
+ raise ValueError(
444
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
445
+ " by setting `config.add_cross_attention=True`"
446
+ )
447
+
448
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
449
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
450
+ cross_attention_outputs = self.crossattention(
451
+ attention_output,
452
+ attention_mask,
453
+ head_mask,
454
+ encoder_hidden_states,
455
+ encoder_attention_mask,
456
+ cross_attn_past_key_value,
457
+ output_attentions,
458
+ )
459
+ attention_output = cross_attention_outputs[0]
460
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
461
+
462
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
463
+ cross_attn_present_key_value = cross_attention_outputs[-1]
464
+ present_key_value = present_key_value + cross_attn_present_key_value
465
+
466
+ layer_output = apply_chunking_to_forward(
467
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
468
+ )
469
+ outputs = (layer_output,) + outputs
470
+
471
+ # if decoder, return the attn key/values as the last output
472
+ if self.is_decoder:
473
+ outputs = outputs + (present_key_value,)
474
+
475
+ return outputs
476
+
477
+ def feed_forward_chunk(self, attention_output):
478
+ intermediate_output = self.intermediate(attention_output)
479
+ layer_output = self.output(intermediate_output, attention_output)
480
+ return layer_output
481
+
482
+
483
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->Camembert
484
+ class CamembertEncoder(nn.Module):
485
+ def __init__(self, config):
486
+ super().__init__()
487
+ self.config = config
488
+ self.layer = nn.ModuleList([CamembertLayer(config) for _ in range(config.num_hidden_layers)])
489
+ self.gradient_checkpointing = False
490
+
491
+ def forward(
492
+ self,
493
+ hidden_states: torch.Tensor,
494
+ attention_mask: Optional[torch.FloatTensor] = None,
495
+ head_mask: Optional[torch.FloatTensor] = None,
496
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
497
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
498
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
499
+ use_cache: Optional[bool] = None,
500
+ output_attentions: Optional[bool] = False,
501
+ output_hidden_states: Optional[bool] = False,
502
+ return_dict: Optional[bool] = True,
503
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
504
+ all_hidden_states = () if output_hidden_states else None
505
+ all_self_attentions = () if output_attentions else None
506
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
507
+
508
+ if self.gradient_checkpointing and self.training:
509
+ if use_cache:
510
+ logger.warning_once(
511
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
512
+ )
513
+ use_cache = False
514
+
515
+ next_decoder_cache = () if use_cache else None
516
+ for i, layer_module in enumerate(self.layer):
517
+ if output_hidden_states:
518
+ all_hidden_states = all_hidden_states + (hidden_states,)
519
+
520
+ layer_head_mask = head_mask[i] if head_mask is not None else None
521
+ past_key_value = past_key_values[i] if past_key_values is not None else None
522
+
523
+ if self.gradient_checkpointing and self.training:
524
+ layer_outputs = self._gradient_checkpointing_func(
525
+ layer_module.__call__,
526
+ hidden_states,
527
+ attention_mask,
528
+ layer_head_mask,
529
+ encoder_hidden_states,
530
+ encoder_attention_mask,
531
+ past_key_value,
532
+ output_attentions,
533
+ )
534
+ else:
535
+ layer_outputs = layer_module(
536
+ hidden_states,
537
+ attention_mask,
538
+ layer_head_mask,
539
+ encoder_hidden_states,
540
+ encoder_attention_mask,
541
+ past_key_value,
542
+ output_attentions,
543
+ )
544
+
545
+ hidden_states = layer_outputs[0]
546
+ if use_cache:
547
+ next_decoder_cache += (layer_outputs[-1],)
548
+ if output_attentions:
549
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
550
+ if self.config.add_cross_attention:
551
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
552
+
553
+ if output_hidden_states:
554
+ all_hidden_states = all_hidden_states + (hidden_states,)
555
+
556
+ if not return_dict:
557
+ return tuple(
558
+ v
559
+ for v in [
560
+ hidden_states,
561
+ next_decoder_cache,
562
+ all_hidden_states,
563
+ all_self_attentions,
564
+ all_cross_attentions,
565
+ ]
566
+ if v is not None
567
+ )
568
+ return BaseModelOutputWithPastAndCrossAttentions(
569
+ last_hidden_state=hidden_states,
570
+ past_key_values=next_decoder_cache,
571
+ hidden_states=all_hidden_states,
572
+ attentions=all_self_attentions,
573
+ cross_attentions=all_cross_attentions,
574
+ )
575
+
576
+
577
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
578
+ class CamembertPooler(nn.Module):
579
+ def __init__(self, config):
580
+ super().__init__()
581
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
582
+ self.activation = nn.Tanh()
583
+
584
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
585
+ # We "pool" the model by simply taking the hidden state corresponding
586
+ # to the first token.
587
+ first_token_tensor = hidden_states[:, 0]
588
+ pooled_output = self.dense(first_token_tensor)
589
+ pooled_output = self.activation(pooled_output)
590
+ return pooled_output
591
+
592
+
593
+ class CamembertPreTrainedModel(PreTrainedModel):
594
+ """
595
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
596
+ models.
597
+ """
598
+
599
+ config_class = CamembertConfig
600
+ base_model_prefix = "roberta"
601
+ supports_gradient_checkpointing = True
602
+
603
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
604
+ def _init_weights(self, module):
605
+ """Initialize the weights"""
606
+ if isinstance(module, nn.Linear):
607
+ # Slightly different from the TF version which uses truncated_normal for initialization
608
+ # cf https://github.com/pytorch/pytorch/pull/5617
609
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
610
+ if module.bias is not None:
611
+ module.bias.data.zero_()
612
+ elif isinstance(module, nn.Embedding):
613
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
614
+ if module.padding_idx is not None:
615
+ module.weight.data[module.padding_idx].zero_()
616
+ elif isinstance(module, nn.LayerNorm):
617
+ module.bias.data.zero_()
618
+ module.weight.data.fill_(1.0)
619
+
620
+
621
+ CAMEMBERT_INPUTS_DOCSTRING = r"""
622
+ Args:
623
+ input_ids (`torch.LongTensor` of shape `({0})`):
624
+ Indices of input sequence tokens in the vocabulary.
625
+
626
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
627
+ [`PreTrainedTokenizer.__call__`] for details.
628
+
629
+ [What are input IDs?](../glossary#input-ids)
630
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
631
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
632
+
633
+ - 1 for tokens that are **not masked**,
634
+ - 0 for tokens that are **masked**.
635
+
636
+ [What are attention masks?](../glossary#attention-mask)
637
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
638
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
639
+ 1]`:
640
+
641
+ - 0 corresponds to a *sentence A* token,
642
+ - 1 corresponds to a *sentence B* token.
643
+
644
+ [What are token type IDs?](../glossary#token-type-ids)
645
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
646
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
647
+ config.max_position_embeddings - 1]`.
648
+
649
+ [What are position IDs?](../glossary#position-ids)
650
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
651
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
652
+
653
+ - 1 indicates the head is **not masked**,
654
+ - 0 indicates the head is **masked**.
655
+
656
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
657
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
658
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
659
+ model's internal embedding lookup matrix.
660
+ output_attentions (`bool`, *optional*):
661
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
662
+ tensors for more detail.
663
+ output_hidden_states (`bool`, *optional*):
664
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
665
+ more detail.
666
+ return_dict (`bool`, *optional*):
667
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
668
+ """
669
+
670
+
671
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Camembert
672
+ class CamembertClassificationHead(nn.Module):
673
+ """Head for sentence-level classification tasks."""
674
+
675
+ def __init__(self, config):
676
+ super().__init__()
677
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
678
+ classifier_dropout = (
679
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
680
+ )
681
+ self.dropout = nn.Dropout(classifier_dropout)
682
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
683
+
684
+ def forward(self, features, **kwargs):
685
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
686
+ x = self.dropout(x)
687
+ x = self.dense(x)
688
+ x = torch.tanh(x)
689
+ x = self.dropout(x)
690
+ x = self.out_proj(x)
691
+ return x
692
+
693
+
694
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Camembert
695
+ class CamembertLMHead(nn.Module):
696
+ """Camembert Head for masked language modeling."""
697
+
698
+ def __init__(self, config):
699
+ super().__init__()
700
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
701
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
702
+
703
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
704
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
705
+ self.decoder.bias = self.bias
706
+
707
+ def forward(self, features, **kwargs):
708
+ x = self.dense(features)
709
+ x = gelu(x)
710
+ x = self.layer_norm(x)
711
+
712
+ # project back to size of vocabulary with bias
713
+ x = self.decoder(x)
714
+
715
+ return x
716
+
717
+ def _tie_weights(self):
718
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
719
+ # For accelerate compatibility and to not break backward compatibility
720
+ if self.decoder.bias.device.type == "meta":
721
+ self.decoder.bias = self.bias
722
+ else:
723
+ self.bias = self.decoder.bias
724
+
725
+
726
+ @add_start_docstrings(
727
+ "The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.",
728
+ CAMEMBERT_START_DOCSTRING,
729
+ )
730
+ class CamembertModel(CamembertPreTrainedModel):
731
+ """
732
+
733
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
734
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
735
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
736
+ Kaiser and Illia Polosukhin.
737
+
738
+ To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to
739
+ `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
740
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
741
+
742
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
743
+
744
+ """
745
+
746
+ _no_split_modules = []
747
+
748
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Camembert
749
+ def __init__(self, config, add_pooling_layer=True):
750
+ super().__init__(config)
751
+ self.config = config
752
+
753
+ self.embeddings = CamembertEmbeddings(config)
754
+ self.encoder = CamembertEncoder(config)
755
+
756
+ self.pooler = CamembertPooler(config) if add_pooling_layer else None
757
+
758
+ # Initialize weights and apply final processing
759
+ self.post_init()
760
+
761
+ def get_input_embeddings(self):
762
+ return self.embeddings.word_embeddings
763
+
764
+ def set_input_embeddings(self, value):
765
+ self.embeddings.word_embeddings = value
766
+
767
+ def _prune_heads(self, heads_to_prune):
768
+ """
769
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
770
+ class PreTrainedModel
771
+ """
772
+ for layer, heads in heads_to_prune.items():
773
+ self.encoder.layer[layer].attention.prune_heads(heads)
774
+
775
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
776
+ @add_code_sample_docstrings(
777
+ checkpoint=_CHECKPOINT_FOR_DOC,
778
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
779
+ config_class=_CONFIG_FOR_DOC,
780
+ )
781
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
782
+ def forward(
783
+ self,
784
+ input_ids: Optional[torch.Tensor] = None,
785
+ attention_mask: Optional[torch.Tensor] = None,
786
+ token_type_ids: Optional[torch.Tensor] = None,
787
+ position_ids: Optional[torch.Tensor] = None,
788
+ head_mask: Optional[torch.Tensor] = None,
789
+ inputs_embeds: Optional[torch.Tensor] = None,
790
+ encoder_hidden_states: Optional[torch.Tensor] = None,
791
+ encoder_attention_mask: Optional[torch.Tensor] = None,
792
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
793
+ use_cache: Optional[bool] = None,
794
+ output_attentions: Optional[bool] = None,
795
+ output_hidden_states: Optional[bool] = None,
796
+ return_dict: Optional[bool] = None,
797
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
798
+ r"""
799
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
800
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
801
+ the model is configured as a decoder.
802
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
803
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
804
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
805
+
806
+ - 1 for tokens that are **not masked**,
807
+ - 0 for tokens that are **masked**.
808
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
809
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
810
+
811
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
812
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
813
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
814
+ use_cache (`bool`, *optional*):
815
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
816
+ `past_key_values`).
817
+ """
818
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
819
+ output_hidden_states = (
820
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
821
+ )
822
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
823
+
824
+ if self.config.is_decoder:
825
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
826
+ else:
827
+ use_cache = False
828
+
829
+ if input_ids is not None and inputs_embeds is not None:
830
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
831
+ elif input_ids is not None:
832
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
833
+ input_shape = input_ids.size()
834
+ elif inputs_embeds is not None:
835
+ input_shape = inputs_embeds.size()[:-1]
836
+ else:
837
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
838
+
839
+ batch_size, seq_length = input_shape
840
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
841
+
842
+ # past_key_values_length
843
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
844
+
845
+ if attention_mask is None:
846
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
847
+
848
+ if token_type_ids is None:
849
+ if hasattr(self.embeddings, "token_type_ids"):
850
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
851
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
852
+ token_type_ids = buffered_token_type_ids_expanded
853
+ else:
854
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
855
+
856
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
857
+ # ourselves in which case we just need to make it broadcastable to all heads.
858
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
859
+
860
+ # If a 2D or 3D attention mask is provided for the cross-attention
861
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
862
+ if self.config.is_decoder and encoder_hidden_states is not None:
863
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
864
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
865
+ if encoder_attention_mask is None:
866
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
867
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
868
+ else:
869
+ encoder_extended_attention_mask = None
870
+
871
+ # Prepare head mask if needed
872
+ # 1.0 in head_mask indicate we keep the head
873
+ # attention_probs has shape bsz x n_heads x N x N
874
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
875
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
876
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
877
+
878
+ embedding_output = self.embeddings(
879
+ input_ids=input_ids,
880
+ position_ids=position_ids,
881
+ token_type_ids=token_type_ids,
882
+ inputs_embeds=inputs_embeds,
883
+ past_key_values_length=past_key_values_length,
884
+ )
885
+ encoder_outputs = self.encoder(
886
+ embedding_output,
887
+ attention_mask=extended_attention_mask,
888
+ head_mask=head_mask,
889
+ encoder_hidden_states=encoder_hidden_states,
890
+ encoder_attention_mask=encoder_extended_attention_mask,
891
+ past_key_values=past_key_values,
892
+ use_cache=use_cache,
893
+ output_attentions=output_attentions,
894
+ output_hidden_states=output_hidden_states,
895
+ return_dict=return_dict,
896
+ )
897
+ sequence_output = encoder_outputs[0]
898
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
899
+
900
+ if not return_dict:
901
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
902
+
903
+ return BaseModelOutputWithPoolingAndCrossAttentions(
904
+ last_hidden_state=sequence_output,
905
+ pooler_output=pooled_output,
906
+ past_key_values=encoder_outputs.past_key_values,
907
+ hidden_states=encoder_outputs.hidden_states,
908
+ attentions=encoder_outputs.attentions,
909
+ cross_attentions=encoder_outputs.cross_attentions,
910
+ )
911
+
912
+
913
+ @add_start_docstrings(
914
+ """CamemBERT Model with a `language modeling` head on top.""",
915
+ CAMEMBERT_START_DOCSTRING,
916
+ )
917
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT
918
+ class CamembertForMaskedLM(CamembertPreTrainedModel):
919
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
920
+
921
+ def __init__(self, config):
922
+ super().__init__(config)
923
+
924
+ if config.is_decoder:
925
+ logger.warning(
926
+ "If you want to use `CamembertForMaskedLM` make sure `config.is_decoder=False` for "
927
+ "bi-directional self-attention."
928
+ )
929
+
930
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
931
+ self.lm_head = CamembertLMHead(config)
932
+
933
+ # Initialize weights and apply final processing
934
+ self.post_init()
935
+
936
+ def get_output_embeddings(self):
937
+ return self.lm_head.decoder
938
+
939
+ def set_output_embeddings(self, new_embeddings):
940
+ self.lm_head.decoder = new_embeddings
941
+
942
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
943
+ @add_code_sample_docstrings(
944
+ checkpoint=_CHECKPOINT_FOR_DOC,
945
+ output_type=MaskedLMOutput,
946
+ config_class=_CONFIG_FOR_DOC,
947
+ mask="<mask>",
948
+ expected_output="' Paris'",
949
+ expected_loss=0.1,
950
+ )
951
+ def forward(
952
+ self,
953
+ input_ids: Optional[torch.LongTensor] = None,
954
+ attention_mask: Optional[torch.FloatTensor] = None,
955
+ token_type_ids: Optional[torch.LongTensor] = None,
956
+ position_ids: Optional[torch.LongTensor] = None,
957
+ head_mask: Optional[torch.FloatTensor] = None,
958
+ inputs_embeds: Optional[torch.FloatTensor] = None,
959
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
960
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
961
+ labels: Optional[torch.LongTensor] = None,
962
+ output_attentions: Optional[bool] = None,
963
+ output_hidden_states: Optional[bool] = None,
964
+ return_dict: Optional[bool] = None,
965
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
966
+ r"""
967
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
968
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
969
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
970
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
971
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
972
+ Used to hide legacy arguments that have been deprecated.
973
+ """
974
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
975
+
976
+ outputs = self.roberta(
977
+ input_ids,
978
+ attention_mask=attention_mask,
979
+ token_type_ids=token_type_ids,
980
+ position_ids=position_ids,
981
+ head_mask=head_mask,
982
+ inputs_embeds=inputs_embeds,
983
+ encoder_hidden_states=encoder_hidden_states,
984
+ encoder_attention_mask=encoder_attention_mask,
985
+ output_attentions=output_attentions,
986
+ output_hidden_states=output_hidden_states,
987
+ return_dict=return_dict,
988
+ )
989
+ sequence_output = outputs[0]
990
+ prediction_scores = self.lm_head(sequence_output)
991
+
992
+ masked_lm_loss = None
993
+ if labels is not None:
994
+ # move labels to correct device to enable model parallelism
995
+ labels = labels.to(prediction_scores.device)
996
+ loss_fct = CrossEntropyLoss()
997
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
998
+
999
+ if not return_dict:
1000
+ output = (prediction_scores,) + outputs[2:]
1001
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1002
+
1003
+ return MaskedLMOutput(
1004
+ loss=masked_lm_loss,
1005
+ logits=prediction_scores,
1006
+ hidden_states=outputs.hidden_states,
1007
+ attentions=outputs.attentions,
1008
+ )
1009
+
1010
+
1011
+ @add_start_docstrings(
1012
+ """
1013
+ CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1014
+ pooled output) e.g. for GLUE tasks.
1015
+ """,
1016
+ CAMEMBERT_START_DOCSTRING,
1017
+ )
1018
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification with Roberta->Camembert, ROBERTA->CAMEMBERT
1019
+ class CamembertForSequenceClassification(CamembertPreTrainedModel):
1020
+ def __init__(self, config):
1021
+ super().__init__(config)
1022
+ self.num_labels = config.num_labels
1023
+ self.config = config
1024
+
1025
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1026
+ self.classifier = CamembertClassificationHead(config)
1027
+
1028
+ # Initialize weights and apply final processing
1029
+ self.post_init()
1030
+
1031
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1032
+ @add_code_sample_docstrings(
1033
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
1034
+ output_type=SequenceClassifierOutput,
1035
+ config_class=_CONFIG_FOR_DOC,
1036
+ expected_output="'optimism'",
1037
+ expected_loss=0.08,
1038
+ )
1039
+ def forward(
1040
+ self,
1041
+ input_ids: Optional[torch.LongTensor] = None,
1042
+ attention_mask: Optional[torch.FloatTensor] = None,
1043
+ token_type_ids: Optional[torch.LongTensor] = None,
1044
+ position_ids: Optional[torch.LongTensor] = None,
1045
+ head_mask: Optional[torch.FloatTensor] = None,
1046
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1047
+ labels: Optional[torch.LongTensor] = None,
1048
+ output_attentions: Optional[bool] = None,
1049
+ output_hidden_states: Optional[bool] = None,
1050
+ return_dict: Optional[bool] = None,
1051
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1052
+ r"""
1053
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1054
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1055
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1056
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1057
+ """
1058
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1059
+
1060
+ outputs = self.roberta(
1061
+ input_ids,
1062
+ attention_mask=attention_mask,
1063
+ token_type_ids=token_type_ids,
1064
+ position_ids=position_ids,
1065
+ head_mask=head_mask,
1066
+ inputs_embeds=inputs_embeds,
1067
+ output_attentions=output_attentions,
1068
+ output_hidden_states=output_hidden_states,
1069
+ return_dict=return_dict,
1070
+ )
1071
+ sequence_output = outputs[0]
1072
+ logits = self.classifier(sequence_output)
1073
+
1074
+ loss = None
1075
+ if labels is not None:
1076
+ # move labels to correct device to enable model parallelism
1077
+ labels = labels.to(logits.device)
1078
+ if self.config.problem_type is None:
1079
+ if self.num_labels == 1:
1080
+ self.config.problem_type = "regression"
1081
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1082
+ self.config.problem_type = "single_label_classification"
1083
+ else:
1084
+ self.config.problem_type = "multi_label_classification"
1085
+
1086
+ if self.config.problem_type == "regression":
1087
+ loss_fct = MSELoss()
1088
+ if self.num_labels == 1:
1089
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1090
+ else:
1091
+ loss = loss_fct(logits, labels)
1092
+ elif self.config.problem_type == "single_label_classification":
1093
+ loss_fct = CrossEntropyLoss()
1094
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1095
+ elif self.config.problem_type == "multi_label_classification":
1096
+ loss_fct = BCEWithLogitsLoss()
1097
+ loss = loss_fct(logits, labels)
1098
+
1099
+ if not return_dict:
1100
+ output = (logits,) + outputs[2:]
1101
+ return ((loss,) + output) if loss is not None else output
1102
+
1103
+ return SequenceClassifierOutput(
1104
+ loss=loss,
1105
+ logits=logits,
1106
+ hidden_states=outputs.hidden_states,
1107
+ attentions=outputs.attentions,
1108
+ )
1109
+
1110
+
1111
+ @add_start_docstrings(
1112
+ """
1113
+ CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1114
+ softmax) e.g. for RocStories/SWAG tasks.
1115
+ """,
1116
+ CAMEMBERT_START_DOCSTRING,
1117
+ )
1118
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMultipleChoice with Roberta->Camembert, ROBERTA->CAMEMBERT
1119
+ class CamembertForMultipleChoice(CamembertPreTrainedModel):
1120
+ def __init__(self, config):
1121
+ super().__init__(config)
1122
+
1123
+ self.roberta = CamembertModel(config)
1124
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1125
+ self.classifier = nn.Linear(config.hidden_size, 1)
1126
+
1127
+ # Initialize weights and apply final processing
1128
+ self.post_init()
1129
+
1130
+ @add_start_docstrings_to_model_forward(
1131
+ CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1132
+ )
1133
+ @add_code_sample_docstrings(
1134
+ checkpoint=_CHECKPOINT_FOR_DOC,
1135
+ output_type=MultipleChoiceModelOutput,
1136
+ config_class=_CONFIG_FOR_DOC,
1137
+ )
1138
+ def forward(
1139
+ self,
1140
+ input_ids: Optional[torch.LongTensor] = None,
1141
+ token_type_ids: Optional[torch.LongTensor] = None,
1142
+ attention_mask: Optional[torch.FloatTensor] = None,
1143
+ labels: Optional[torch.LongTensor] = None,
1144
+ position_ids: Optional[torch.LongTensor] = None,
1145
+ head_mask: Optional[torch.FloatTensor] = None,
1146
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1147
+ output_attentions: Optional[bool] = None,
1148
+ output_hidden_states: Optional[bool] = None,
1149
+ return_dict: Optional[bool] = None,
1150
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1151
+ r"""
1152
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1153
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1154
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1155
+ `input_ids` above)
1156
+ """
1157
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1158
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1159
+
1160
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1161
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1162
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1163
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1164
+ flat_inputs_embeds = (
1165
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1166
+ if inputs_embeds is not None
1167
+ else None
1168
+ )
1169
+
1170
+ outputs = self.roberta(
1171
+ flat_input_ids,
1172
+ position_ids=flat_position_ids,
1173
+ token_type_ids=flat_token_type_ids,
1174
+ attention_mask=flat_attention_mask,
1175
+ head_mask=head_mask,
1176
+ inputs_embeds=flat_inputs_embeds,
1177
+ output_attentions=output_attentions,
1178
+ output_hidden_states=output_hidden_states,
1179
+ return_dict=return_dict,
1180
+ )
1181
+ pooled_output = outputs[1]
1182
+
1183
+ pooled_output = self.dropout(pooled_output)
1184
+ logits = self.classifier(pooled_output)
1185
+ reshaped_logits = logits.view(-1, num_choices)
1186
+
1187
+ loss = None
1188
+ if labels is not None:
1189
+ # move labels to correct device to enable model parallelism
1190
+ labels = labels.to(reshaped_logits.device)
1191
+ loss_fct = CrossEntropyLoss()
1192
+ loss = loss_fct(reshaped_logits, labels)
1193
+
1194
+ if not return_dict:
1195
+ output = (reshaped_logits,) + outputs[2:]
1196
+ return ((loss,) + output) if loss is not None else output
1197
+
1198
+ return MultipleChoiceModelOutput(
1199
+ loss=loss,
1200
+ logits=reshaped_logits,
1201
+ hidden_states=outputs.hidden_states,
1202
+ attentions=outputs.attentions,
1203
+ )
1204
+
1205
+
1206
+ @add_start_docstrings(
1207
+ """
1208
+ CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1209
+ for Named-Entity-Recognition (NER) tasks.
1210
+ """,
1211
+ CAMEMBERT_START_DOCSTRING,
1212
+ )
1213
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification with Roberta->Camembert, ROBERTA->CAMEMBERT
1214
+ class CamembertForTokenClassification(CamembertPreTrainedModel):
1215
+ def __init__(self, config):
1216
+ super().__init__(config)
1217
+ self.num_labels = config.num_labels
1218
+
1219
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1220
+ classifier_dropout = (
1221
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1222
+ )
1223
+ self.dropout = nn.Dropout(classifier_dropout)
1224
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1225
+
1226
+ # Initialize weights and apply final processing
1227
+ self.post_init()
1228
+
1229
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1230
+ @add_code_sample_docstrings(
1231
+ checkpoint="Jean-Baptiste/roberta-large-ner-english",
1232
+ output_type=TokenClassifierOutput,
1233
+ config_class=_CONFIG_FOR_DOC,
1234
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
1235
+ expected_loss=0.01,
1236
+ )
1237
+ def forward(
1238
+ self,
1239
+ input_ids: Optional[torch.LongTensor] = None,
1240
+ attention_mask: Optional[torch.FloatTensor] = None,
1241
+ token_type_ids: Optional[torch.LongTensor] = None,
1242
+ position_ids: Optional[torch.LongTensor] = None,
1243
+ head_mask: Optional[torch.FloatTensor] = None,
1244
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1245
+ labels: Optional[torch.LongTensor] = None,
1246
+ output_attentions: Optional[bool] = None,
1247
+ output_hidden_states: Optional[bool] = None,
1248
+ return_dict: Optional[bool] = None,
1249
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1250
+ r"""
1251
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1252
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1253
+ """
1254
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1255
+
1256
+ outputs = self.roberta(
1257
+ input_ids,
1258
+ attention_mask=attention_mask,
1259
+ token_type_ids=token_type_ids,
1260
+ position_ids=position_ids,
1261
+ head_mask=head_mask,
1262
+ inputs_embeds=inputs_embeds,
1263
+ output_attentions=output_attentions,
1264
+ output_hidden_states=output_hidden_states,
1265
+ return_dict=return_dict,
1266
+ )
1267
+
1268
+ sequence_output = outputs[0]
1269
+
1270
+ sequence_output = self.dropout(sequence_output)
1271
+ logits = self.classifier(sequence_output)
1272
+
1273
+ loss = None
1274
+ if labels is not None:
1275
+ # move labels to correct device to enable model parallelism
1276
+ labels = labels.to(logits.device)
1277
+ loss_fct = CrossEntropyLoss()
1278
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1279
+
1280
+ if not return_dict:
1281
+ output = (logits,) + outputs[2:]
1282
+ return ((loss,) + output) if loss is not None else output
1283
+
1284
+ return TokenClassifierOutput(
1285
+ loss=loss,
1286
+ logits=logits,
1287
+ hidden_states=outputs.hidden_states,
1288
+ attentions=outputs.attentions,
1289
+ )
1290
+
1291
+
1292
+ @add_start_docstrings(
1293
+ """
1294
+ CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1295
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`
1296
+ """,
1297
+ CAMEMBERT_START_DOCSTRING,
1298
+ )
1299
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering with Roberta->Camembert, ROBERTA->CAMEMBERT
1300
+ class CamembertForQuestionAnswering(CamembertPreTrainedModel):
1301
+ def __init__(self, config):
1302
+ super().__init__(config)
1303
+ self.num_labels = config.num_labels
1304
+
1305
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1306
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1307
+
1308
+ # Initialize weights and apply final processing
1309
+ self.post_init()
1310
+
1311
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1312
+ @add_code_sample_docstrings(
1313
+ checkpoint="deepset/roberta-base-squad2",
1314
+ output_type=QuestionAnsweringModelOutput,
1315
+ config_class=_CONFIG_FOR_DOC,
1316
+ expected_output="' puppet'",
1317
+ expected_loss=0.86,
1318
+ )
1319
+ def forward(
1320
+ self,
1321
+ input_ids: Optional[torch.LongTensor] = None,
1322
+ attention_mask: Optional[torch.FloatTensor] = None,
1323
+ token_type_ids: Optional[torch.LongTensor] = None,
1324
+ position_ids: Optional[torch.LongTensor] = None,
1325
+ head_mask: Optional[torch.FloatTensor] = None,
1326
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1327
+ start_positions: Optional[torch.LongTensor] = None,
1328
+ end_positions: Optional[torch.LongTensor] = None,
1329
+ output_attentions: Optional[bool] = None,
1330
+ output_hidden_states: Optional[bool] = None,
1331
+ return_dict: Optional[bool] = None,
1332
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1333
+ r"""
1334
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1335
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1336
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1337
+ are not taken into account for computing the loss.
1338
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1339
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1340
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1341
+ are not taken into account for computing the loss.
1342
+ """
1343
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1344
+
1345
+ outputs = self.roberta(
1346
+ input_ids,
1347
+ attention_mask=attention_mask,
1348
+ token_type_ids=token_type_ids,
1349
+ position_ids=position_ids,
1350
+ head_mask=head_mask,
1351
+ inputs_embeds=inputs_embeds,
1352
+ output_attentions=output_attentions,
1353
+ output_hidden_states=output_hidden_states,
1354
+ return_dict=return_dict,
1355
+ )
1356
+
1357
+ sequence_output = outputs[0]
1358
+
1359
+ logits = self.qa_outputs(sequence_output)
1360
+ start_logits, end_logits = logits.split(1, dim=-1)
1361
+ start_logits = start_logits.squeeze(-1).contiguous()
1362
+ end_logits = end_logits.squeeze(-1).contiguous()
1363
+
1364
+ total_loss = None
1365
+ if start_positions is not None and end_positions is not None:
1366
+ # If we are on multi-GPU, split add a dimension
1367
+ if len(start_positions.size()) > 1:
1368
+ start_positions = start_positions.squeeze(-1)
1369
+ if len(end_positions.size()) > 1:
1370
+ end_positions = end_positions.squeeze(-1)
1371
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1372
+ ignored_index = start_logits.size(1)
1373
+ start_positions = start_positions.clamp(0, ignored_index)
1374
+ end_positions = end_positions.clamp(0, ignored_index)
1375
+
1376
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1377
+ start_loss = loss_fct(start_logits, start_positions)
1378
+ end_loss = loss_fct(end_logits, end_positions)
1379
+ total_loss = (start_loss + end_loss) / 2
1380
+
1381
+ if not return_dict:
1382
+ output = (start_logits, end_logits) + outputs[2:]
1383
+ return ((total_loss,) + output) if total_loss is not None else output
1384
+
1385
+ return QuestionAnsweringModelOutput(
1386
+ loss=total_loss,
1387
+ start_logits=start_logits,
1388
+ end_logits=end_logits,
1389
+ hidden_states=outputs.hidden_states,
1390
+ attentions=outputs.attentions,
1391
+ )
1392
+
1393
+
1394
+ @add_start_docstrings(
1395
+ """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", CAMEMBERT_START_DOCSTRING
1396
+ )
1397
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with Roberta->Camembert, ROBERTA->CAMEMBERT, FacebookAI/roberta-base->almanach/camembert-base
1398
+ class CamembertForCausalLM(CamembertPreTrainedModel):
1399
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1400
+
1401
+ def __init__(self, config):
1402
+ super().__init__(config)
1403
+
1404
+ if not config.is_decoder:
1405
+ logger.warning("If you want to use `CamembertLMHeadModel` as a standalone, add `is_decoder=True.`")
1406
+
1407
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1408
+ self.lm_head = CamembertLMHead(config)
1409
+
1410
+ # Initialize weights and apply final processing
1411
+ self.post_init()
1412
+
1413
+ def get_output_embeddings(self):
1414
+ return self.lm_head.decoder
1415
+
1416
+ def set_output_embeddings(self, new_embeddings):
1417
+ self.lm_head.decoder = new_embeddings
1418
+
1419
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1420
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1421
+ def forward(
1422
+ self,
1423
+ input_ids: Optional[torch.LongTensor] = None,
1424
+ attention_mask: Optional[torch.FloatTensor] = None,
1425
+ token_type_ids: Optional[torch.LongTensor] = None,
1426
+ position_ids: Optional[torch.LongTensor] = None,
1427
+ head_mask: Optional[torch.FloatTensor] = None,
1428
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1429
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1430
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1431
+ labels: Optional[torch.LongTensor] = None,
1432
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
1433
+ use_cache: Optional[bool] = None,
1434
+ output_attentions: Optional[bool] = None,
1435
+ output_hidden_states: Optional[bool] = None,
1436
+ return_dict: Optional[bool] = None,
1437
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1438
+ r"""
1439
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1440
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1441
+ the model is configured as a decoder.
1442
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1443
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1444
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1445
+
1446
+ - 1 for tokens that are **not masked**,
1447
+ - 0 for tokens that are **masked**.
1448
+
1449
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1450
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1451
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1452
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1453
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1454
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1455
+
1456
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1457
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1458
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1459
+ use_cache (`bool`, *optional*):
1460
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1461
+ `past_key_values`).
1462
+
1463
+ Returns:
1464
+
1465
+ Example:
1466
+
1467
+ ```python
1468
+ >>> from transformers import AutoTokenizer, CamembertForCausalLM, AutoConfig
1469
+ >>> import torch
1470
+
1471
+ >>> tokenizer = AutoTokenizer.from_pretrained("almanach/camembert-base")
1472
+ >>> config = AutoConfig.from_pretrained("almanach/camembert-base")
1473
+ >>> config.is_decoder = True
1474
+ >>> model = CamembertForCausalLM.from_pretrained("almanach/camembert-base", config=config)
1475
+
1476
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1477
+ >>> outputs = model(**inputs)
1478
+
1479
+ >>> prediction_logits = outputs.logits
1480
+ ```"""
1481
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1482
+ if labels is not None:
1483
+ use_cache = False
1484
+
1485
+ outputs = self.roberta(
1486
+ input_ids,
1487
+ attention_mask=attention_mask,
1488
+ token_type_ids=token_type_ids,
1489
+ position_ids=position_ids,
1490
+ head_mask=head_mask,
1491
+ inputs_embeds=inputs_embeds,
1492
+ encoder_hidden_states=encoder_hidden_states,
1493
+ encoder_attention_mask=encoder_attention_mask,
1494
+ past_key_values=past_key_values,
1495
+ use_cache=use_cache,
1496
+ output_attentions=output_attentions,
1497
+ output_hidden_states=output_hidden_states,
1498
+ return_dict=return_dict,
1499
+ )
1500
+
1501
+ sequence_output = outputs[0]
1502
+ prediction_scores = self.lm_head(sequence_output)
1503
+
1504
+ lm_loss = None
1505
+ if labels is not None:
1506
+ # move labels to correct device to enable model parallelism
1507
+ labels = labels.to(prediction_scores.device)
1508
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1509
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1510
+ labels = labels[:, 1:].contiguous()
1511
+ loss_fct = CrossEntropyLoss()
1512
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1513
+
1514
+ if not return_dict:
1515
+ output = (prediction_scores,) + outputs[2:]
1516
+ return ((lm_loss,) + output) if lm_loss is not None else output
1517
+
1518
+ return CausalLMOutputWithCrossAttentions(
1519
+ loss=lm_loss,
1520
+ logits=prediction_scores,
1521
+ past_key_values=outputs.past_key_values,
1522
+ hidden_states=outputs.hidden_states,
1523
+ attentions=outputs.attentions,
1524
+ cross_attentions=outputs.cross_attentions,
1525
+ )
1526
+
1527
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1528
+ input_shape = input_ids.shape
1529
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1530
+ if attention_mask is None:
1531
+ attention_mask = input_ids.new_ones(input_shape)
1532
+
1533
+ # cut decoder_input_ids if past_key_values is used
1534
+ if past_key_values is not None:
1535
+ past_length = past_key_values[0][0].shape[2]
1536
+
1537
+ # Some generation methods already pass only the last input ID
1538
+ if input_ids.shape[1] > past_length:
1539
+ remove_prefix_length = past_length
1540
+ else:
1541
+ # Default to old behavior: keep only final ID
1542
+ remove_prefix_length = input_ids.shape[1] - 1
1543
+
1544
+ input_ids = input_ids[:, remove_prefix_length:]
1545
+
1546
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1547
+
1548
+ def _reorder_cache(self, past_key_values, beam_idx):
1549
+ reordered_past = ()
1550
+ for layer_past in past_key_values:
1551
+ reordered_past += (
1552
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1553
+ )
1554
+ return reordered_past
1555
+
1556
+
1557
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
1558
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1559
+ """
1560
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1561
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1562
+
1563
+ Args:
1564
+ x: torch.Tensor x:
1565
+
1566
+ Returns: torch.Tensor
1567
+ """
1568
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1569
+ mask = input_ids.ne(padding_idx).int()
1570
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1571
+ return incremental_indices.long() + padding_idx
venv/lib/python3.10/site-packages/transformers/models/camembert/modeling_tf_camembert.py ADDED
@@ -0,0 +1,1793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 CamemBERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutputWithPastAndCrossAttentions,
31
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
32
+ TFCausalLMOutputWithCrossAttentions,
33
+ TFMaskedLMOutput,
34
+ TFMultipleChoiceModelOutput,
35
+ TFQuestionAnsweringModelOutput,
36
+ TFSequenceClassifierOutput,
37
+ TFTokenClassifierOutput,
38
+ )
39
+ from ...modeling_tf_utils import (
40
+ TFCausalLanguageModelingLoss,
41
+ TFMaskedLanguageModelingLoss,
42
+ TFModelInputType,
43
+ TFMultipleChoiceLoss,
44
+ TFPreTrainedModel,
45
+ TFQuestionAnsweringLoss,
46
+ TFSequenceClassificationLoss,
47
+ TFTokenClassificationLoss,
48
+ get_initializer,
49
+ keras,
50
+ keras_serializable,
51
+ unpack_inputs,
52
+ )
53
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
54
+ from ...utils import (
55
+ add_code_sample_docstrings,
56
+ add_start_docstrings,
57
+ add_start_docstrings_to_model_forward,
58
+ logging,
59
+ )
60
+ from .configuration_camembert import CamembertConfig
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CHECKPOINT_FOR_DOC = "almanach/camembert-base"
66
+ _CONFIG_FOR_DOC = "CamembertConfig"
67
+
68
+
69
+ from ..deprecated._archive_maps import TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
70
+
71
+
72
+ CAMEMBERT_START_DOCSTRING = r"""
73
+
74
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
75
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
76
+ etc.)
77
+
78
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
79
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
80
+ behavior.
81
+
82
+ <Tip>
83
+
84
+ TensorFlow models and layers in `transformers` accept two formats as input:
85
+
86
+ - having all inputs as keyword arguments (like PyTorch models), or
87
+ - having all inputs as a list, tuple or dict in the first positional argument.
88
+
89
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
90
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
91
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
92
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
93
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
94
+ positional argument:
95
+
96
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
97
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
98
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
99
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
100
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
101
+
102
+ Note that when creating models and layers with
103
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
104
+ about any of this, as you can just pass inputs like you would to any other Python function!
105
+
106
+ </Tip>
107
+
108
+ Parameters:
109
+ config ([`CamembertConfig`]): Model configuration class with all the parameters of the
110
+ model. Initializing with a config file does not load the weights associated with the model, only the
111
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
112
+ """
113
+
114
+ CAMEMBERT_INPUTS_DOCSTRING = r"""
115
+ Args:
116
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
117
+ Indices of input sequence tokens in the vocabulary.
118
+
119
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
120
+ [`PreTrainedTokenizer.encode`] for details.
121
+
122
+ [What are input IDs?](../glossary#input-ids)
123
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
124
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
125
+
126
+ - 1 for tokens that are **not masked**,
127
+ - 0 for tokens that are **masked**.
128
+
129
+ [What are attention masks?](../glossary#attention-mask)
130
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
131
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
132
+ 1]`:
133
+
134
+ - 0 corresponds to a *sentence A* token,
135
+ - 1 corresponds to a *sentence B* token.
136
+
137
+ [What are token type IDs?](../glossary#token-type-ids)
138
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
139
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
140
+ config.max_position_embeddings - 1]`.
141
+
142
+ [What are position IDs?](../glossary#position-ids)
143
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
144
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
145
+
146
+ - 1 indicates the head is **not masked**,
147
+ - 0 indicates the head is **masked**.
148
+
149
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
150
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
151
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
152
+ model's internal embedding lookup matrix.
153
+ output_attentions (`bool`, *optional*):
154
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
155
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
156
+ config will be used instead.
157
+ output_hidden_states (`bool`, *optional*):
158
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
159
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
160
+ used instead.
161
+ return_dict (`bool`, *optional*):
162
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
163
+ eager mode, in graph mode the value will always be set to True.
164
+ training (`bool`, *optional*, defaults to `False`):
165
+ Whether or not to use the model in training mode (some modules like dropout modules have different
166
+ behaviors between training and evaluation).
167
+ """
168
+
169
+
170
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings
171
+ class TFCamembertEmbeddings(keras.layers.Layer):
172
+ """
173
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
174
+ """
175
+
176
+ def __init__(self, config, **kwargs):
177
+ super().__init__(**kwargs)
178
+
179
+ self.padding_idx = 1
180
+ self.config = config
181
+ self.hidden_size = config.hidden_size
182
+ self.max_position_embeddings = config.max_position_embeddings
183
+ self.initializer_range = config.initializer_range
184
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
185
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
186
+
187
+ def build(self, input_shape=None):
188
+ with tf.name_scope("word_embeddings"):
189
+ self.weight = self.add_weight(
190
+ name="weight",
191
+ shape=[self.config.vocab_size, self.hidden_size],
192
+ initializer=get_initializer(self.initializer_range),
193
+ )
194
+
195
+ with tf.name_scope("token_type_embeddings"):
196
+ self.token_type_embeddings = self.add_weight(
197
+ name="embeddings",
198
+ shape=[self.config.type_vocab_size, self.hidden_size],
199
+ initializer=get_initializer(self.initializer_range),
200
+ )
201
+
202
+ with tf.name_scope("position_embeddings"):
203
+ self.position_embeddings = self.add_weight(
204
+ name="embeddings",
205
+ shape=[self.max_position_embeddings, self.hidden_size],
206
+ initializer=get_initializer(self.initializer_range),
207
+ )
208
+
209
+ if self.built:
210
+ return
211
+ self.built = True
212
+ if getattr(self, "LayerNorm", None) is not None:
213
+ with tf.name_scope(self.LayerNorm.name):
214
+ self.LayerNorm.build([None, None, self.config.hidden_size])
215
+
216
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
217
+ """
218
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
219
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
220
+
221
+ Args:
222
+ input_ids: tf.Tensor
223
+ Returns: tf.Tensor
224
+ """
225
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
226
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
227
+
228
+ return incremental_indices + self.padding_idx
229
+
230
+ def call(
231
+ self,
232
+ input_ids=None,
233
+ position_ids=None,
234
+ token_type_ids=None,
235
+ inputs_embeds=None,
236
+ past_key_values_length=0,
237
+ training=False,
238
+ ):
239
+ """
240
+ Applies embedding based on inputs tensor.
241
+
242
+ Returns:
243
+ final_embeddings (`tf.Tensor`): output embedding tensor.
244
+ """
245
+ assert not (input_ids is None and inputs_embeds is None)
246
+
247
+ if input_ids is not None:
248
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
249
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
250
+
251
+ input_shape = shape_list(inputs_embeds)[:-1]
252
+
253
+ if token_type_ids is None:
254
+ token_type_ids = tf.fill(dims=input_shape, value=0)
255
+
256
+ if position_ids is None:
257
+ if input_ids is not None:
258
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
259
+ position_ids = self.create_position_ids_from_input_ids(
260
+ input_ids=input_ids, past_key_values_length=past_key_values_length
261
+ )
262
+ else:
263
+ position_ids = tf.expand_dims(
264
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
265
+ )
266
+
267
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
268
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
269
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
270
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
271
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
272
+
273
+ return final_embeddings
274
+
275
+
276
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Camembert
277
+ class TFCamembertPooler(keras.layers.Layer):
278
+ def __init__(self, config: CamembertConfig, **kwargs):
279
+ super().__init__(**kwargs)
280
+
281
+ self.dense = keras.layers.Dense(
282
+ units=config.hidden_size,
283
+ kernel_initializer=get_initializer(config.initializer_range),
284
+ activation="tanh",
285
+ name="dense",
286
+ )
287
+ self.config = config
288
+
289
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
290
+ # We "pool" the model by simply taking the hidden state corresponding
291
+ # to the first token.
292
+ first_token_tensor = hidden_states[:, 0]
293
+ pooled_output = self.dense(inputs=first_token_tensor)
294
+
295
+ return pooled_output
296
+
297
+ def build(self, input_shape=None):
298
+ if self.built:
299
+ return
300
+ self.built = True
301
+ if getattr(self, "dense", None) is not None:
302
+ with tf.name_scope(self.dense.name):
303
+ self.dense.build([None, None, self.config.hidden_size])
304
+
305
+
306
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Camembert
307
+ class TFCamembertSelfAttention(keras.layers.Layer):
308
+ def __init__(self, config: CamembertConfig, **kwargs):
309
+ super().__init__(**kwargs)
310
+
311
+ if config.hidden_size % config.num_attention_heads != 0:
312
+ raise ValueError(
313
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
314
+ f"of attention heads ({config.num_attention_heads})"
315
+ )
316
+
317
+ self.num_attention_heads = config.num_attention_heads
318
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
319
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
320
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
321
+
322
+ self.query = keras.layers.Dense(
323
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
324
+ )
325
+ self.key = keras.layers.Dense(
326
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
327
+ )
328
+ self.value = keras.layers.Dense(
329
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
330
+ )
331
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
332
+
333
+ self.is_decoder = config.is_decoder
334
+ self.config = config
335
+
336
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
337
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
338
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
339
+
340
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
341
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
342
+
343
+ def call(
344
+ self,
345
+ hidden_states: tf.Tensor,
346
+ attention_mask: tf.Tensor,
347
+ head_mask: tf.Tensor,
348
+ encoder_hidden_states: tf.Tensor,
349
+ encoder_attention_mask: tf.Tensor,
350
+ past_key_value: Tuple[tf.Tensor],
351
+ output_attentions: bool,
352
+ training: bool = False,
353
+ ) -> Tuple[tf.Tensor]:
354
+ batch_size = shape_list(hidden_states)[0]
355
+ mixed_query_layer = self.query(inputs=hidden_states)
356
+
357
+ # If this is instantiated as a cross-attention module, the keys
358
+ # and values come from an encoder; the attention mask needs to be
359
+ # such that the encoder's padding tokens are not attended to.
360
+ is_cross_attention = encoder_hidden_states is not None
361
+
362
+ if is_cross_attention and past_key_value is not None:
363
+ # reuse k,v, cross_attentions
364
+ key_layer = past_key_value[0]
365
+ value_layer = past_key_value[1]
366
+ attention_mask = encoder_attention_mask
367
+ elif is_cross_attention:
368
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
369
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
370
+ attention_mask = encoder_attention_mask
371
+ elif past_key_value is not None:
372
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
373
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
374
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
375
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
376
+ else:
377
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
378
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
379
+
380
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
381
+
382
+ if self.is_decoder:
383
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
384
+ # Further calls to cross_attention layer can then reuse all cross-attention
385
+ # key/value_states (first "if" case)
386
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
387
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
388
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
389
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
390
+ past_key_value = (key_layer, value_layer)
391
+
392
+ # Take the dot product between "query" and "key" to get the raw attention scores.
393
+ # (batch size, num_heads, seq_len_q, seq_len_k)
394
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
395
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
396
+ attention_scores = tf.divide(attention_scores, dk)
397
+
398
+ if attention_mask is not None:
399
+ # Apply the attention mask is (precomputed for all layers in TFCamembertModel call() function)
400
+ attention_scores = tf.add(attention_scores, attention_mask)
401
+
402
+ # Normalize the attention scores to probabilities.
403
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
404
+
405
+ # This is actually dropping out entire tokens to attend to, which might
406
+ # seem a bit unusual, but is taken from the original Transformer paper.
407
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
408
+
409
+ # Mask heads if we want to
410
+ if head_mask is not None:
411
+ attention_probs = tf.multiply(attention_probs, head_mask)
412
+
413
+ attention_output = tf.matmul(attention_probs, value_layer)
414
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
415
+
416
+ # (batch_size, seq_len_q, all_head_size)
417
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
418
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
419
+
420
+ if self.is_decoder:
421
+ outputs = outputs + (past_key_value,)
422
+ return outputs
423
+
424
+ def build(self, input_shape=None):
425
+ if self.built:
426
+ return
427
+ self.built = True
428
+ if getattr(self, "query", None) is not None:
429
+ with tf.name_scope(self.query.name):
430
+ self.query.build([None, None, self.config.hidden_size])
431
+ if getattr(self, "key", None) is not None:
432
+ with tf.name_scope(self.key.name):
433
+ self.key.build([None, None, self.config.hidden_size])
434
+ if getattr(self, "value", None) is not None:
435
+ with tf.name_scope(self.value.name):
436
+ self.value.build([None, None, self.config.hidden_size])
437
+
438
+
439
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Camembert
440
+ class TFCamembertSelfOutput(keras.layers.Layer):
441
+ def __init__(self, config: CamembertConfig, **kwargs):
442
+ super().__init__(**kwargs)
443
+
444
+ self.dense = keras.layers.Dense(
445
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
446
+ )
447
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
448
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
449
+ self.config = config
450
+
451
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
452
+ hidden_states = self.dense(inputs=hidden_states)
453
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
454
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
455
+
456
+ return hidden_states
457
+
458
+ def build(self, input_shape=None):
459
+ if self.built:
460
+ return
461
+ self.built = True
462
+ if getattr(self, "dense", None) is not None:
463
+ with tf.name_scope(self.dense.name):
464
+ self.dense.build([None, None, self.config.hidden_size])
465
+ if getattr(self, "LayerNorm", None) is not None:
466
+ with tf.name_scope(self.LayerNorm.name):
467
+ self.LayerNorm.build([None, None, self.config.hidden_size])
468
+
469
+
470
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Camembert
471
+ class TFCamembertAttention(keras.layers.Layer):
472
+ def __init__(self, config: CamembertConfig, **kwargs):
473
+ super().__init__(**kwargs)
474
+
475
+ self.self_attention = TFCamembertSelfAttention(config, name="self")
476
+ self.dense_output = TFCamembertSelfOutput(config, name="output")
477
+
478
+ def prune_heads(self, heads):
479
+ raise NotImplementedError
480
+
481
+ def call(
482
+ self,
483
+ input_tensor: tf.Tensor,
484
+ attention_mask: tf.Tensor,
485
+ head_mask: tf.Tensor,
486
+ encoder_hidden_states: tf.Tensor,
487
+ encoder_attention_mask: tf.Tensor,
488
+ past_key_value: Tuple[tf.Tensor],
489
+ output_attentions: bool,
490
+ training: bool = False,
491
+ ) -> Tuple[tf.Tensor]:
492
+ self_outputs = self.self_attention(
493
+ hidden_states=input_tensor,
494
+ attention_mask=attention_mask,
495
+ head_mask=head_mask,
496
+ encoder_hidden_states=encoder_hidden_states,
497
+ encoder_attention_mask=encoder_attention_mask,
498
+ past_key_value=past_key_value,
499
+ output_attentions=output_attentions,
500
+ training=training,
501
+ )
502
+ attention_output = self.dense_output(
503
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
504
+ )
505
+ # add attentions (possibly with past_key_value) if we output them
506
+ outputs = (attention_output,) + self_outputs[1:]
507
+
508
+ return outputs
509
+
510
+ def build(self, input_shape=None):
511
+ if self.built:
512
+ return
513
+ self.built = True
514
+ if getattr(self, "self_attention", None) is not None:
515
+ with tf.name_scope(self.self_attention.name):
516
+ self.self_attention.build(None)
517
+ if getattr(self, "dense_output", None) is not None:
518
+ with tf.name_scope(self.dense_output.name):
519
+ self.dense_output.build(None)
520
+
521
+
522
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Camembert
523
+ class TFCamembertIntermediate(keras.layers.Layer):
524
+ def __init__(self, config: CamembertConfig, **kwargs):
525
+ super().__init__(**kwargs)
526
+
527
+ self.dense = keras.layers.Dense(
528
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
529
+ )
530
+
531
+ if isinstance(config.hidden_act, str):
532
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
533
+ else:
534
+ self.intermediate_act_fn = config.hidden_act
535
+ self.config = config
536
+
537
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
538
+ hidden_states = self.dense(inputs=hidden_states)
539
+ hidden_states = self.intermediate_act_fn(hidden_states)
540
+
541
+ return hidden_states
542
+
543
+ def build(self, input_shape=None):
544
+ if self.built:
545
+ return
546
+ self.built = True
547
+ if getattr(self, "dense", None) is not None:
548
+ with tf.name_scope(self.dense.name):
549
+ self.dense.build([None, None, self.config.hidden_size])
550
+
551
+
552
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Camembert
553
+ class TFCamembertOutput(keras.layers.Layer):
554
+ def __init__(self, config: CamembertConfig, **kwargs):
555
+ super().__init__(**kwargs)
556
+
557
+ self.dense = keras.layers.Dense(
558
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
559
+ )
560
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
561
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
562
+ self.config = config
563
+
564
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
565
+ hidden_states = self.dense(inputs=hidden_states)
566
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
567
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
568
+
569
+ return hidden_states
570
+
571
+ def build(self, input_shape=None):
572
+ if self.built:
573
+ return
574
+ self.built = True
575
+ if getattr(self, "dense", None) is not None:
576
+ with tf.name_scope(self.dense.name):
577
+ self.dense.build([None, None, self.config.intermediate_size])
578
+ if getattr(self, "LayerNorm", None) is not None:
579
+ with tf.name_scope(self.LayerNorm.name):
580
+ self.LayerNorm.build([None, None, self.config.hidden_size])
581
+
582
+
583
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Camembert
584
+ class TFCamembertLayer(keras.layers.Layer):
585
+ def __init__(self, config: CamembertConfig, **kwargs):
586
+ super().__init__(**kwargs)
587
+
588
+ self.attention = TFCamembertAttention(config, name="attention")
589
+ self.is_decoder = config.is_decoder
590
+ self.add_cross_attention = config.add_cross_attention
591
+ if self.add_cross_attention:
592
+ if not self.is_decoder:
593
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
594
+ self.crossattention = TFCamembertAttention(config, name="crossattention")
595
+ self.intermediate = TFCamembertIntermediate(config, name="intermediate")
596
+ self.bert_output = TFCamembertOutput(config, name="output")
597
+
598
+ def call(
599
+ self,
600
+ hidden_states: tf.Tensor,
601
+ attention_mask: tf.Tensor,
602
+ head_mask: tf.Tensor,
603
+ encoder_hidden_states: tf.Tensor | None,
604
+ encoder_attention_mask: tf.Tensor | None,
605
+ past_key_value: Tuple[tf.Tensor] | None,
606
+ output_attentions: bool,
607
+ training: bool = False,
608
+ ) -> Tuple[tf.Tensor]:
609
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
610
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
611
+ self_attention_outputs = self.attention(
612
+ input_tensor=hidden_states,
613
+ attention_mask=attention_mask,
614
+ head_mask=head_mask,
615
+ encoder_hidden_states=None,
616
+ encoder_attention_mask=None,
617
+ past_key_value=self_attn_past_key_value,
618
+ output_attentions=output_attentions,
619
+ training=training,
620
+ )
621
+ attention_output = self_attention_outputs[0]
622
+
623
+ # if decoder, the last output is tuple of self-attn cache
624
+ if self.is_decoder:
625
+ outputs = self_attention_outputs[1:-1]
626
+ present_key_value = self_attention_outputs[-1]
627
+ else:
628
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
629
+
630
+ cross_attn_present_key_value = None
631
+ if self.is_decoder and encoder_hidden_states is not None:
632
+ if not hasattr(self, "crossattention"):
633
+ raise ValueError(
634
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
635
+ " by setting `config.add_cross_attention=True`"
636
+ )
637
+
638
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
639
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
640
+ cross_attention_outputs = self.crossattention(
641
+ input_tensor=attention_output,
642
+ attention_mask=attention_mask,
643
+ head_mask=head_mask,
644
+ encoder_hidden_states=encoder_hidden_states,
645
+ encoder_attention_mask=encoder_attention_mask,
646
+ past_key_value=cross_attn_past_key_value,
647
+ output_attentions=output_attentions,
648
+ training=training,
649
+ )
650
+ attention_output = cross_attention_outputs[0]
651
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
652
+
653
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
654
+ cross_attn_present_key_value = cross_attention_outputs[-1]
655
+ present_key_value = present_key_value + cross_attn_present_key_value
656
+
657
+ intermediate_output = self.intermediate(hidden_states=attention_output)
658
+ layer_output = self.bert_output(
659
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
660
+ )
661
+ outputs = (layer_output,) + outputs # add attentions if we output them
662
+
663
+ # if decoder, return the attn key/values as the last output
664
+ if self.is_decoder:
665
+ outputs = outputs + (present_key_value,)
666
+
667
+ return outputs
668
+
669
+ def build(self, input_shape=None):
670
+ if self.built:
671
+ return
672
+ self.built = True
673
+ if getattr(self, "attention", None) is not None:
674
+ with tf.name_scope(self.attention.name):
675
+ self.attention.build(None)
676
+ if getattr(self, "intermediate", None) is not None:
677
+ with tf.name_scope(self.intermediate.name):
678
+ self.intermediate.build(None)
679
+ if getattr(self, "bert_output", None) is not None:
680
+ with tf.name_scope(self.bert_output.name):
681
+ self.bert_output.build(None)
682
+ if getattr(self, "crossattention", None) is not None:
683
+ with tf.name_scope(self.crossattention.name):
684
+ self.crossattention.build(None)
685
+
686
+
687
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Camembert
688
+ class TFCamembertEncoder(keras.layers.Layer):
689
+ def __init__(self, config: CamembertConfig, **kwargs):
690
+ super().__init__(**kwargs)
691
+ self.config = config
692
+ self.layer = [TFCamembertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
693
+
694
+ def call(
695
+ self,
696
+ hidden_states: tf.Tensor,
697
+ attention_mask: tf.Tensor,
698
+ head_mask: tf.Tensor,
699
+ encoder_hidden_states: tf.Tensor | None,
700
+ encoder_attention_mask: tf.Tensor | None,
701
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
702
+ use_cache: Optional[bool],
703
+ output_attentions: bool,
704
+ output_hidden_states: bool,
705
+ return_dict: bool,
706
+ training: bool = False,
707
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
708
+ all_hidden_states = () if output_hidden_states else None
709
+ all_attentions = () if output_attentions else None
710
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
711
+
712
+ next_decoder_cache = () if use_cache else None
713
+ for i, layer_module in enumerate(self.layer):
714
+ if output_hidden_states:
715
+ all_hidden_states = all_hidden_states + (hidden_states,)
716
+
717
+ past_key_value = past_key_values[i] if past_key_values is not None else None
718
+
719
+ layer_outputs = layer_module(
720
+ hidden_states=hidden_states,
721
+ attention_mask=attention_mask,
722
+ head_mask=head_mask[i],
723
+ encoder_hidden_states=encoder_hidden_states,
724
+ encoder_attention_mask=encoder_attention_mask,
725
+ past_key_value=past_key_value,
726
+ output_attentions=output_attentions,
727
+ training=training,
728
+ )
729
+ hidden_states = layer_outputs[0]
730
+
731
+ if use_cache:
732
+ next_decoder_cache += (layer_outputs[-1],)
733
+
734
+ if output_attentions:
735
+ all_attentions = all_attentions + (layer_outputs[1],)
736
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
737
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
738
+
739
+ # Add last layer
740
+ if output_hidden_states:
741
+ all_hidden_states = all_hidden_states + (hidden_states,)
742
+
743
+ if not return_dict:
744
+ return tuple(
745
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
746
+ )
747
+
748
+ return TFBaseModelOutputWithPastAndCrossAttentions(
749
+ last_hidden_state=hidden_states,
750
+ past_key_values=next_decoder_cache,
751
+ hidden_states=all_hidden_states,
752
+ attentions=all_attentions,
753
+ cross_attentions=all_cross_attentions,
754
+ )
755
+
756
+ def build(self, input_shape=None):
757
+ if self.built:
758
+ return
759
+ self.built = True
760
+ if getattr(self, "layer", None) is not None:
761
+ for layer in self.layer:
762
+ with tf.name_scope(layer.name):
763
+ layer.build(None)
764
+
765
+
766
+ @keras_serializable
767
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaMainLayer with Roberta->Camembert
768
+ class TFCamembertMainLayer(keras.layers.Layer):
769
+ config_class = CamembertConfig
770
+
771
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
772
+ super().__init__(**kwargs)
773
+
774
+ self.config = config
775
+ self.is_decoder = config.is_decoder
776
+
777
+ self.num_hidden_layers = config.num_hidden_layers
778
+ self.initializer_range = config.initializer_range
779
+ self.output_attentions = config.output_attentions
780
+ self.output_hidden_states = config.output_hidden_states
781
+ self.return_dict = config.use_return_dict
782
+ self.encoder = TFCamembertEncoder(config, name="encoder")
783
+ self.pooler = TFCamembertPooler(config, name="pooler") if add_pooling_layer else None
784
+ # The embeddings must be the last declaration in order to follow the weights order
785
+ self.embeddings = TFCamembertEmbeddings(config, name="embeddings")
786
+
787
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
788
+ def get_input_embeddings(self) -> keras.layers.Layer:
789
+ return self.embeddings
790
+
791
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
792
+ def set_input_embeddings(self, value: tf.Variable):
793
+ self.embeddings.weight = value
794
+ self.embeddings.vocab_size = shape_list(value)[0]
795
+
796
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
797
+ def _prune_heads(self, heads_to_prune):
798
+ """
799
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
800
+ class PreTrainedModel
801
+ """
802
+ raise NotImplementedError
803
+
804
+ @unpack_inputs
805
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
806
+ def call(
807
+ self,
808
+ input_ids: TFModelInputType | None = None,
809
+ attention_mask: np.ndarray | tf.Tensor | None = None,
810
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
811
+ position_ids: np.ndarray | tf.Tensor | None = None,
812
+ head_mask: np.ndarray | tf.Tensor | None = None,
813
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
814
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
815
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
816
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
817
+ use_cache: Optional[bool] = None,
818
+ output_attentions: Optional[bool] = None,
819
+ output_hidden_states: Optional[bool] = None,
820
+ return_dict: Optional[bool] = None,
821
+ training: bool = False,
822
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
823
+ if not self.config.is_decoder:
824
+ use_cache = False
825
+
826
+ if input_ids is not None and inputs_embeds is not None:
827
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
828
+ elif input_ids is not None:
829
+ input_shape = shape_list(input_ids)
830
+ elif inputs_embeds is not None:
831
+ input_shape = shape_list(inputs_embeds)[:-1]
832
+ else:
833
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
834
+
835
+ batch_size, seq_length = input_shape
836
+
837
+ if past_key_values is None:
838
+ past_key_values_length = 0
839
+ past_key_values = [None] * len(self.encoder.layer)
840
+ else:
841
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
842
+
843
+ if attention_mask is None:
844
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
845
+
846
+ if token_type_ids is None:
847
+ token_type_ids = tf.fill(dims=input_shape, value=0)
848
+
849
+ embedding_output = self.embeddings(
850
+ input_ids=input_ids,
851
+ position_ids=position_ids,
852
+ token_type_ids=token_type_ids,
853
+ inputs_embeds=inputs_embeds,
854
+ past_key_values_length=past_key_values_length,
855
+ training=training,
856
+ )
857
+
858
+ # We create a 3D attention mask from a 2D tensor mask.
859
+ # Sizes are [batch_size, 1, 1, to_seq_length]
860
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
861
+ # this attention mask is more simple than the triangular masking of causal attention
862
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
863
+ attention_mask_shape = shape_list(attention_mask)
864
+
865
+ mask_seq_length = seq_length + past_key_values_length
866
+ # Copied from `modeling_tf_t5.py`
867
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
868
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
869
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
870
+ if self.is_decoder:
871
+ seq_ids = tf.range(mask_seq_length)
872
+ causal_mask = tf.less_equal(
873
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
874
+ seq_ids[None, :, None],
875
+ )
876
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
877
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
878
+ attention_mask_shape = shape_list(extended_attention_mask)
879
+ extended_attention_mask = tf.reshape(
880
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
881
+ )
882
+ if past_key_values[0] is not None:
883
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
884
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
885
+ else:
886
+ extended_attention_mask = tf.reshape(
887
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
888
+ )
889
+
890
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
891
+ # masked positions, this operation will create a tensor which is 0.0 for
892
+ # positions we want to attend and -10000.0 for masked positions.
893
+ # Since we are adding it to the raw scores before the softmax, this is
894
+ # effectively the same as removing these entirely.
895
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
896
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
897
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
898
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
899
+
900
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
901
+ if self.is_decoder and encoder_attention_mask is not None:
902
+ # If a 2D ou 3D attention mask is provided for the cross-attention
903
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
904
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
905
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
906
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
907
+ if num_dims_encoder_attention_mask == 3:
908
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
909
+ if num_dims_encoder_attention_mask == 2:
910
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
911
+
912
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
913
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
914
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
915
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
916
+
917
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
918
+ else:
919
+ encoder_extended_attention_mask = None
920
+
921
+ # Prepare head mask if needed
922
+ # 1.0 in head_mask indicate we keep the head
923
+ # attention_probs has shape bsz x n_heads x N x N
924
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
925
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
926
+ if head_mask is not None:
927
+ raise NotImplementedError
928
+ else:
929
+ head_mask = [None] * self.config.num_hidden_layers
930
+
931
+ encoder_outputs = self.encoder(
932
+ hidden_states=embedding_output,
933
+ attention_mask=extended_attention_mask,
934
+ head_mask=head_mask,
935
+ encoder_hidden_states=encoder_hidden_states,
936
+ encoder_attention_mask=encoder_extended_attention_mask,
937
+ past_key_values=past_key_values,
938
+ use_cache=use_cache,
939
+ output_attentions=output_attentions,
940
+ output_hidden_states=output_hidden_states,
941
+ return_dict=return_dict,
942
+ training=training,
943
+ )
944
+
945
+ sequence_output = encoder_outputs[0]
946
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
947
+
948
+ if not return_dict:
949
+ return (
950
+ sequence_output,
951
+ pooled_output,
952
+ ) + encoder_outputs[1:]
953
+
954
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
955
+ last_hidden_state=sequence_output,
956
+ pooler_output=pooled_output,
957
+ past_key_values=encoder_outputs.past_key_values,
958
+ hidden_states=encoder_outputs.hidden_states,
959
+ attentions=encoder_outputs.attentions,
960
+ cross_attentions=encoder_outputs.cross_attentions,
961
+ )
962
+
963
+ def build(self, input_shape=None):
964
+ if self.built:
965
+ return
966
+ self.built = True
967
+ if getattr(self, "encoder", None) is not None:
968
+ with tf.name_scope(self.encoder.name):
969
+ self.encoder.build(None)
970
+ if getattr(self, "pooler", None) is not None:
971
+ with tf.name_scope(self.pooler.name):
972
+ self.pooler.build(None)
973
+ if getattr(self, "embeddings", None) is not None:
974
+ with tf.name_scope(self.embeddings.name):
975
+ self.embeddings.build(None)
976
+
977
+
978
+ class TFCamembertPreTrainedModel(TFPreTrainedModel):
979
+ """
980
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
981
+ models.
982
+ """
983
+
984
+ config_class = CamembertConfig
985
+ base_model_prefix = "roberta"
986
+
987
+
988
+ @add_start_docstrings(
989
+ "The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.",
990
+ CAMEMBERT_START_DOCSTRING,
991
+ )
992
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaModel with Roberta->Camembert, ROBERTA->CAMEMBERT
993
+ class TFCamembertModel(TFCamembertPreTrainedModel):
994
+ def __init__(self, config, *inputs, **kwargs):
995
+ super().__init__(config, *inputs, **kwargs)
996
+ self.roberta = TFCamembertMainLayer(config, name="roberta")
997
+
998
+ @unpack_inputs
999
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1000
+ @add_code_sample_docstrings(
1001
+ checkpoint=_CHECKPOINT_FOR_DOC,
1002
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1003
+ config_class=_CONFIG_FOR_DOC,
1004
+ )
1005
+ def call(
1006
+ self,
1007
+ input_ids: TFModelInputType | None = None,
1008
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1009
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1010
+ position_ids: np.ndarray | tf.Tensor | None = None,
1011
+ head_mask: np.ndarray | tf.Tensor | None = None,
1012
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1013
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1014
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1015
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1016
+ use_cache: Optional[bool] = None,
1017
+ output_attentions: Optional[bool] = None,
1018
+ output_hidden_states: Optional[bool] = None,
1019
+ return_dict: Optional[bool] = None,
1020
+ training: Optional[bool] = False,
1021
+ ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]:
1022
+ r"""
1023
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1024
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1025
+ the model is configured as a decoder.
1026
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1027
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1028
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1029
+
1030
+ - 1 for tokens that are **not masked**,
1031
+ - 0 for tokens that are **masked**.
1032
+
1033
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1034
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1035
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1036
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1037
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1038
+ use_cache (`bool`, *optional*, defaults to `True`):
1039
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1040
+ `past_key_values`). Set to `False` during training, `True` during generation
1041
+ """
1042
+ outputs = self.roberta(
1043
+ input_ids=input_ids,
1044
+ attention_mask=attention_mask,
1045
+ token_type_ids=token_type_ids,
1046
+ position_ids=position_ids,
1047
+ head_mask=head_mask,
1048
+ inputs_embeds=inputs_embeds,
1049
+ encoder_hidden_states=encoder_hidden_states,
1050
+ encoder_attention_mask=encoder_attention_mask,
1051
+ past_key_values=past_key_values,
1052
+ use_cache=use_cache,
1053
+ output_attentions=output_attentions,
1054
+ output_hidden_states=output_hidden_states,
1055
+ return_dict=return_dict,
1056
+ training=training,
1057
+ )
1058
+
1059
+ return outputs
1060
+
1061
+ def build(self, input_shape=None):
1062
+ if self.built:
1063
+ return
1064
+ self.built = True
1065
+ if getattr(self, "roberta", None) is not None:
1066
+ with tf.name_scope(self.roberta.name):
1067
+ self.roberta.build(None)
1068
+
1069
+
1070
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->Camembert
1071
+ class TFCamembertLMHead(keras.layers.Layer):
1072
+ """Camembert Head for masked language modeling."""
1073
+
1074
+ def __init__(self, config, input_embeddings, **kwargs):
1075
+ super().__init__(**kwargs)
1076
+
1077
+ self.config = config
1078
+ self.hidden_size = config.hidden_size
1079
+ self.dense = keras.layers.Dense(
1080
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1081
+ )
1082
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1083
+ self.act = get_tf_activation("gelu")
1084
+
1085
+ # The output weights are the same as the input embeddings, but there is
1086
+ # an output-only bias for each token.
1087
+ self.decoder = input_embeddings
1088
+
1089
+ def build(self, input_shape=None):
1090
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1091
+
1092
+ if self.built:
1093
+ return
1094
+ self.built = True
1095
+ if getattr(self, "dense", None) is not None:
1096
+ with tf.name_scope(self.dense.name):
1097
+ self.dense.build([None, None, self.config.hidden_size])
1098
+ if getattr(self, "layer_norm", None) is not None:
1099
+ with tf.name_scope(self.layer_norm.name):
1100
+ self.layer_norm.build([None, None, self.config.hidden_size])
1101
+
1102
+ def get_output_embeddings(self):
1103
+ return self.decoder
1104
+
1105
+ def set_output_embeddings(self, value):
1106
+ self.decoder.weight = value
1107
+ self.decoder.vocab_size = shape_list(value)[0]
1108
+
1109
+ def get_bias(self):
1110
+ return {"bias": self.bias}
1111
+
1112
+ def set_bias(self, value):
1113
+ self.bias = value["bias"]
1114
+ self.config.vocab_size = shape_list(value["bias"])[0]
1115
+
1116
+ def call(self, hidden_states):
1117
+ hidden_states = self.dense(hidden_states)
1118
+ hidden_states = self.act(hidden_states)
1119
+ hidden_states = self.layer_norm(hidden_states)
1120
+
1121
+ # project back to size of vocabulary with bias
1122
+ seq_length = shape_list(tensor=hidden_states)[1]
1123
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1124
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
1125
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1126
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1127
+
1128
+ return hidden_states
1129
+
1130
+
1131
+ @add_start_docstrings(
1132
+ """CamemBERT Model with a `language modeling` head on top.""",
1133
+ CAMEMBERT_START_DOCSTRING,
1134
+ )
1135
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT
1136
+ class TFCamembertForMaskedLM(TFCamembertPreTrainedModel, TFMaskedLanguageModelingLoss):
1137
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1138
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1139
+
1140
+ def __init__(self, config, *inputs, **kwargs):
1141
+ super().__init__(config, *inputs, **kwargs)
1142
+
1143
+ self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta")
1144
+ self.lm_head = TFCamembertLMHead(config, self.roberta.embeddings, name="lm_head")
1145
+
1146
+ def get_lm_head(self):
1147
+ return self.lm_head
1148
+
1149
+ def get_prefix_bias_name(self):
1150
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1151
+ return self.name + "/" + self.lm_head.name
1152
+
1153
+ @unpack_inputs
1154
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1155
+ @add_code_sample_docstrings(
1156
+ checkpoint=_CHECKPOINT_FOR_DOC,
1157
+ output_type=TFMaskedLMOutput,
1158
+ config_class=_CONFIG_FOR_DOC,
1159
+ mask="<mask>",
1160
+ expected_output="' Paris'",
1161
+ expected_loss=0.1,
1162
+ )
1163
+ def call(
1164
+ self,
1165
+ input_ids: TFModelInputType | None = None,
1166
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1167
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1168
+ position_ids: np.ndarray | tf.Tensor | None = None,
1169
+ head_mask: np.ndarray | tf.Tensor | None = None,
1170
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1171
+ output_attentions: Optional[bool] = None,
1172
+ output_hidden_states: Optional[bool] = None,
1173
+ return_dict: Optional[bool] = None,
1174
+ labels: np.ndarray | tf.Tensor | None = None,
1175
+ training: Optional[bool] = False,
1176
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1177
+ r"""
1178
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1179
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1180
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1181
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1182
+ """
1183
+ outputs = self.roberta(
1184
+ input_ids,
1185
+ attention_mask=attention_mask,
1186
+ token_type_ids=token_type_ids,
1187
+ position_ids=position_ids,
1188
+ head_mask=head_mask,
1189
+ inputs_embeds=inputs_embeds,
1190
+ output_attentions=output_attentions,
1191
+ output_hidden_states=output_hidden_states,
1192
+ return_dict=return_dict,
1193
+ training=training,
1194
+ )
1195
+
1196
+ sequence_output = outputs[0]
1197
+ prediction_scores = self.lm_head(sequence_output)
1198
+
1199
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1200
+
1201
+ if not return_dict:
1202
+ output = (prediction_scores,) + outputs[2:]
1203
+ return ((loss,) + output) if loss is not None else output
1204
+
1205
+ return TFMaskedLMOutput(
1206
+ loss=loss,
1207
+ logits=prediction_scores,
1208
+ hidden_states=outputs.hidden_states,
1209
+ attentions=outputs.attentions,
1210
+ )
1211
+
1212
+ def build(self, input_shape=None):
1213
+ if self.built:
1214
+ return
1215
+ self.built = True
1216
+ if getattr(self, "roberta", None) is not None:
1217
+ with tf.name_scope(self.roberta.name):
1218
+ self.roberta.build(None)
1219
+ if getattr(self, "lm_head", None) is not None:
1220
+ with tf.name_scope(self.lm_head.name):
1221
+ self.lm_head.build(None)
1222
+
1223
+
1224
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaClassificationHead
1225
+ class TFCamembertClassificationHead(keras.layers.Layer):
1226
+ """Head for sentence-level classification tasks."""
1227
+
1228
+ def __init__(self, config, **kwargs):
1229
+ super().__init__(**kwargs)
1230
+ self.dense = keras.layers.Dense(
1231
+ config.hidden_size,
1232
+ kernel_initializer=get_initializer(config.initializer_range),
1233
+ activation="tanh",
1234
+ name="dense",
1235
+ )
1236
+ classifier_dropout = (
1237
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1238
+ )
1239
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1240
+ self.out_proj = keras.layers.Dense(
1241
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1242
+ )
1243
+ self.config = config
1244
+
1245
+ def call(self, features, training=False):
1246
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1247
+ x = self.dropout(x, training=training)
1248
+ x = self.dense(x)
1249
+ x = self.dropout(x, training=training)
1250
+ x = self.out_proj(x)
1251
+ return x
1252
+
1253
+ def build(self, input_shape=None):
1254
+ if self.built:
1255
+ return
1256
+ self.built = True
1257
+ if getattr(self, "dense", None) is not None:
1258
+ with tf.name_scope(self.dense.name):
1259
+ self.dense.build([None, None, self.config.hidden_size])
1260
+ if getattr(self, "out_proj", None) is not None:
1261
+ with tf.name_scope(self.out_proj.name):
1262
+ self.out_proj.build([None, None, self.config.hidden_size])
1263
+
1264
+
1265
+ @add_start_docstrings(
1266
+ """
1267
+ CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1268
+ pooled output) e.g. for GLUE tasks.
1269
+ """,
1270
+ CAMEMBERT_START_DOCSTRING,
1271
+ )
1272
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification with Roberta->Camembert, ROBERTA->CAMEMBERT
1273
+ class TFCamembertForSequenceClassification(TFCamembertPreTrainedModel, TFSequenceClassificationLoss):
1274
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1275
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1276
+
1277
+ def __init__(self, config, *inputs, **kwargs):
1278
+ super().__init__(config, *inputs, **kwargs)
1279
+ self.num_labels = config.num_labels
1280
+
1281
+ self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta")
1282
+ self.classifier = TFCamembertClassificationHead(config, name="classifier")
1283
+
1284
+ @unpack_inputs
1285
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1286
+ @add_code_sample_docstrings(
1287
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
1288
+ output_type=TFSequenceClassifierOutput,
1289
+ config_class=_CONFIG_FOR_DOC,
1290
+ expected_output="'optimism'",
1291
+ expected_loss=0.08,
1292
+ )
1293
+ def call(
1294
+ self,
1295
+ input_ids: TFModelInputType | None = None,
1296
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1297
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1298
+ position_ids: np.ndarray | tf.Tensor | None = None,
1299
+ head_mask: np.ndarray | tf.Tensor | None = None,
1300
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1301
+ output_attentions: Optional[bool] = None,
1302
+ output_hidden_states: Optional[bool] = None,
1303
+ return_dict: Optional[bool] = None,
1304
+ labels: np.ndarray | tf.Tensor | None = None,
1305
+ training: Optional[bool] = False,
1306
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1307
+ r"""
1308
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1309
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1310
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1311
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1312
+ """
1313
+ outputs = self.roberta(
1314
+ input_ids,
1315
+ attention_mask=attention_mask,
1316
+ token_type_ids=token_type_ids,
1317
+ position_ids=position_ids,
1318
+ head_mask=head_mask,
1319
+ inputs_embeds=inputs_embeds,
1320
+ output_attentions=output_attentions,
1321
+ output_hidden_states=output_hidden_states,
1322
+ return_dict=return_dict,
1323
+ training=training,
1324
+ )
1325
+ sequence_output = outputs[0]
1326
+ logits = self.classifier(sequence_output, training=training)
1327
+
1328
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1329
+
1330
+ if not return_dict:
1331
+ output = (logits,) + outputs[2:]
1332
+ return ((loss,) + output) if loss is not None else output
1333
+
1334
+ return TFSequenceClassifierOutput(
1335
+ loss=loss,
1336
+ logits=logits,
1337
+ hidden_states=outputs.hidden_states,
1338
+ attentions=outputs.attentions,
1339
+ )
1340
+
1341
+ def build(self, input_shape=None):
1342
+ if self.built:
1343
+ return
1344
+ self.built = True
1345
+ if getattr(self, "roberta", None) is not None:
1346
+ with tf.name_scope(self.roberta.name):
1347
+ self.roberta.build(None)
1348
+ if getattr(self, "classifier", None) is not None:
1349
+ with tf.name_scope(self.classifier.name):
1350
+ self.classifier.build(None)
1351
+
1352
+
1353
+ @add_start_docstrings(
1354
+ """
1355
+ CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1356
+ for Named-Entity-Recognition (NER) tasks.
1357
+ """,
1358
+ CAMEMBERT_START_DOCSTRING,
1359
+ )
1360
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification with Roberta->Camembert, ROBERTA->CAMEMBERT
1361
+ class TFCamembertForTokenClassification(TFCamembertPreTrainedModel, TFTokenClassificationLoss):
1362
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1363
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1364
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1365
+
1366
+ def __init__(self, config, *inputs, **kwargs):
1367
+ super().__init__(config, *inputs, **kwargs)
1368
+ self.num_labels = config.num_labels
1369
+
1370
+ self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta")
1371
+ classifier_dropout = (
1372
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1373
+ )
1374
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1375
+ self.classifier = keras.layers.Dense(
1376
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1377
+ )
1378
+ self.config = config
1379
+
1380
+ @unpack_inputs
1381
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1382
+ @add_code_sample_docstrings(
1383
+ checkpoint="ydshieh/roberta-large-ner-english",
1384
+ output_type=TFTokenClassifierOutput,
1385
+ config_class=_CONFIG_FOR_DOC,
1386
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
1387
+ expected_loss=0.01,
1388
+ )
1389
+ def call(
1390
+ self,
1391
+ input_ids: TFModelInputType | None = None,
1392
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1393
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1394
+ position_ids: np.ndarray | tf.Tensor | None = None,
1395
+ head_mask: np.ndarray | tf.Tensor | None = None,
1396
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1397
+ output_attentions: Optional[bool] = None,
1398
+ output_hidden_states: Optional[bool] = None,
1399
+ return_dict: Optional[bool] = None,
1400
+ labels: np.ndarray | tf.Tensor | None = None,
1401
+ training: Optional[bool] = False,
1402
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1403
+ r"""
1404
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1405
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1406
+ """
1407
+ outputs = self.roberta(
1408
+ input_ids,
1409
+ attention_mask=attention_mask,
1410
+ token_type_ids=token_type_ids,
1411
+ position_ids=position_ids,
1412
+ head_mask=head_mask,
1413
+ inputs_embeds=inputs_embeds,
1414
+ output_attentions=output_attentions,
1415
+ output_hidden_states=output_hidden_states,
1416
+ return_dict=return_dict,
1417
+ training=training,
1418
+ )
1419
+ sequence_output = outputs[0]
1420
+
1421
+ sequence_output = self.dropout(sequence_output, training=training)
1422
+ logits = self.classifier(sequence_output)
1423
+
1424
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1425
+
1426
+ if not return_dict:
1427
+ output = (logits,) + outputs[2:]
1428
+ return ((loss,) + output) if loss is not None else output
1429
+
1430
+ return TFTokenClassifierOutput(
1431
+ loss=loss,
1432
+ logits=logits,
1433
+ hidden_states=outputs.hidden_states,
1434
+ attentions=outputs.attentions,
1435
+ )
1436
+
1437
+ def build(self, input_shape=None):
1438
+ if self.built:
1439
+ return
1440
+ self.built = True
1441
+ if getattr(self, "roberta", None) is not None:
1442
+ with tf.name_scope(self.roberta.name):
1443
+ self.roberta.build(None)
1444
+ if getattr(self, "classifier", None) is not None:
1445
+ with tf.name_scope(self.classifier.name):
1446
+ self.classifier.build([None, None, self.config.hidden_size])
1447
+
1448
+
1449
+ @add_start_docstrings(
1450
+ """
1451
+ CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1452
+ softmax) e.g. for RocStories/SWAG tasks.
1453
+ """,
1454
+ CAMEMBERT_START_DOCSTRING,
1455
+ )
1456
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMultipleChoice with Roberta->Camembert, ROBERTA->CAMEMBERT
1457
+ class TFCamembertForMultipleChoice(TFCamembertPreTrainedModel, TFMultipleChoiceLoss):
1458
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1459
+ _keys_to_ignore_on_load_unexpected = [r"lm_head"]
1460
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1461
+
1462
+ def __init__(self, config, *inputs, **kwargs):
1463
+ super().__init__(config, *inputs, **kwargs)
1464
+
1465
+ self.roberta = TFCamembertMainLayer(config, name="roberta")
1466
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1467
+ self.classifier = keras.layers.Dense(
1468
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1469
+ )
1470
+ self.config = config
1471
+
1472
+ @unpack_inputs
1473
+ @add_start_docstrings_to_model_forward(
1474
+ CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1475
+ )
1476
+ @add_code_sample_docstrings(
1477
+ checkpoint=_CHECKPOINT_FOR_DOC,
1478
+ output_type=TFMultipleChoiceModelOutput,
1479
+ config_class=_CONFIG_FOR_DOC,
1480
+ )
1481
+ def call(
1482
+ self,
1483
+ input_ids: TFModelInputType | None = None,
1484
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1485
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1486
+ position_ids: np.ndarray | tf.Tensor | None = None,
1487
+ head_mask: np.ndarray | tf.Tensor | None = None,
1488
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1489
+ output_attentions: Optional[bool] = None,
1490
+ output_hidden_states: Optional[bool] = None,
1491
+ return_dict: Optional[bool] = None,
1492
+ labels: np.ndarray | tf.Tensor | None = None,
1493
+ training: Optional[bool] = False,
1494
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1495
+ r"""
1496
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1497
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1498
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1499
+ """
1500
+
1501
+ if input_ids is not None:
1502
+ num_choices = shape_list(input_ids)[1]
1503
+ seq_length = shape_list(input_ids)[2]
1504
+ else:
1505
+ num_choices = shape_list(inputs_embeds)[1]
1506
+ seq_length = shape_list(inputs_embeds)[2]
1507
+
1508
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1509
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1510
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1511
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1512
+ outputs = self.roberta(
1513
+ flat_input_ids,
1514
+ flat_attention_mask,
1515
+ flat_token_type_ids,
1516
+ flat_position_ids,
1517
+ head_mask,
1518
+ inputs_embeds,
1519
+ output_attentions,
1520
+ output_hidden_states,
1521
+ return_dict=return_dict,
1522
+ training=training,
1523
+ )
1524
+ pooled_output = outputs[1]
1525
+ pooled_output = self.dropout(pooled_output, training=training)
1526
+ logits = self.classifier(pooled_output)
1527
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1528
+
1529
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1530
+
1531
+ if not return_dict:
1532
+ output = (reshaped_logits,) + outputs[2:]
1533
+ return ((loss,) + output) if loss is not None else output
1534
+
1535
+ return TFMultipleChoiceModelOutput(
1536
+ loss=loss,
1537
+ logits=reshaped_logits,
1538
+ hidden_states=outputs.hidden_states,
1539
+ attentions=outputs.attentions,
1540
+ )
1541
+
1542
+ def build(self, input_shape=None):
1543
+ if self.built:
1544
+ return
1545
+ self.built = True
1546
+ if getattr(self, "roberta", None) is not None:
1547
+ with tf.name_scope(self.roberta.name):
1548
+ self.roberta.build(None)
1549
+ if getattr(self, "classifier", None) is not None:
1550
+ with tf.name_scope(self.classifier.name):
1551
+ self.classifier.build([None, None, self.config.hidden_size])
1552
+
1553
+
1554
+ @add_start_docstrings(
1555
+ """
1556
+ CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1557
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1558
+ """,
1559
+ CAMEMBERT_START_DOCSTRING,
1560
+ )
1561
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering with Roberta->Camembert, ROBERTA->CAMEMBERT
1562
+ class TFCamembertForQuestionAnswering(TFCamembertPreTrainedModel, TFQuestionAnsweringLoss):
1563
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1564
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1565
+
1566
+ def __init__(self, config, *inputs, **kwargs):
1567
+ super().__init__(config, *inputs, **kwargs)
1568
+ self.num_labels = config.num_labels
1569
+
1570
+ self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta")
1571
+ self.qa_outputs = keras.layers.Dense(
1572
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1573
+ )
1574
+ self.config = config
1575
+
1576
+ @unpack_inputs
1577
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1578
+ @add_code_sample_docstrings(
1579
+ checkpoint="ydshieh/roberta-base-squad2",
1580
+ output_type=TFQuestionAnsweringModelOutput,
1581
+ config_class=_CONFIG_FOR_DOC,
1582
+ expected_output="' puppet'",
1583
+ expected_loss=0.86,
1584
+ )
1585
+ def call(
1586
+ self,
1587
+ input_ids: TFModelInputType | None = None,
1588
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1589
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1590
+ position_ids: np.ndarray | tf.Tensor | None = None,
1591
+ head_mask: np.ndarray | tf.Tensor | None = None,
1592
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1593
+ output_attentions: Optional[bool] = None,
1594
+ output_hidden_states: Optional[bool] = None,
1595
+ return_dict: Optional[bool] = None,
1596
+ start_positions: np.ndarray | tf.Tensor | None = None,
1597
+ end_positions: np.ndarray | tf.Tensor | None = None,
1598
+ training: Optional[bool] = False,
1599
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1600
+ r"""
1601
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1602
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1603
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1604
+ are not taken into account for computing the loss.
1605
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1606
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1607
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1608
+ are not taken into account for computing the loss.
1609
+ """
1610
+ outputs = self.roberta(
1611
+ input_ids,
1612
+ attention_mask=attention_mask,
1613
+ token_type_ids=token_type_ids,
1614
+ position_ids=position_ids,
1615
+ head_mask=head_mask,
1616
+ inputs_embeds=inputs_embeds,
1617
+ output_attentions=output_attentions,
1618
+ output_hidden_states=output_hidden_states,
1619
+ return_dict=return_dict,
1620
+ training=training,
1621
+ )
1622
+ sequence_output = outputs[0]
1623
+
1624
+ logits = self.qa_outputs(sequence_output)
1625
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1626
+ start_logits = tf.squeeze(start_logits, axis=-1)
1627
+ end_logits = tf.squeeze(end_logits, axis=-1)
1628
+
1629
+ loss = None
1630
+ if start_positions is not None and end_positions is not None:
1631
+ labels = {"start_position": start_positions}
1632
+ labels["end_position"] = end_positions
1633
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1634
+
1635
+ if not return_dict:
1636
+ output = (start_logits, end_logits) + outputs[2:]
1637
+ return ((loss,) + output) if loss is not None else output
1638
+
1639
+ return TFQuestionAnsweringModelOutput(
1640
+ loss=loss,
1641
+ start_logits=start_logits,
1642
+ end_logits=end_logits,
1643
+ hidden_states=outputs.hidden_states,
1644
+ attentions=outputs.attentions,
1645
+ )
1646
+
1647
+ def build(self, input_shape=None):
1648
+ if self.built:
1649
+ return
1650
+ self.built = True
1651
+ if getattr(self, "roberta", None) is not None:
1652
+ with tf.name_scope(self.roberta.name):
1653
+ self.roberta.build(None)
1654
+ if getattr(self, "qa_outputs", None) is not None:
1655
+ with tf.name_scope(self.qa_outputs.name):
1656
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1657
+
1658
+
1659
+ @add_start_docstrings(
1660
+ """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", CAMEMBERT_START_DOCSTRING
1661
+ )
1662
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForCausalLM with Roberta->Camembert, ROBERTA->CAMEMBERT
1663
+ class TFCamembertForCausalLM(TFCamembertPreTrainedModel, TFCausalLanguageModelingLoss):
1664
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1665
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1666
+
1667
+ def __init__(self, config: CamembertConfig, *inputs, **kwargs):
1668
+ super().__init__(config, *inputs, **kwargs)
1669
+
1670
+ if not config.is_decoder:
1671
+ logger.warning("If you want to use `TFCamembertLMHeadModel` as a standalone, add `is_decoder=True.`")
1672
+
1673
+ self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta")
1674
+ self.lm_head = TFCamembertLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head")
1675
+
1676
+ def get_lm_head(self):
1677
+ return self.lm_head
1678
+
1679
+ def get_prefix_bias_name(self):
1680
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1681
+ return self.name + "/" + self.lm_head.name
1682
+
1683
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
1684
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1685
+ input_shape = input_ids.shape
1686
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1687
+ if attention_mask is None:
1688
+ attention_mask = tf.ones(input_shape)
1689
+
1690
+ # cut decoder_input_ids if past is used
1691
+ if past_key_values is not None:
1692
+ input_ids = input_ids[:, -1:]
1693
+
1694
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1695
+
1696
+ @unpack_inputs
1697
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1698
+ @add_code_sample_docstrings(
1699
+ checkpoint=_CHECKPOINT_FOR_DOC,
1700
+ output_type=TFCausalLMOutputWithCrossAttentions,
1701
+ config_class=_CONFIG_FOR_DOC,
1702
+ )
1703
+ def call(
1704
+ self,
1705
+ input_ids: TFModelInputType | None = None,
1706
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1707
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1708
+ position_ids: np.ndarray | tf.Tensor | None = None,
1709
+ head_mask: np.ndarray | tf.Tensor | None = None,
1710
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1711
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1712
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1713
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1714
+ use_cache: Optional[bool] = None,
1715
+ output_attentions: Optional[bool] = None,
1716
+ output_hidden_states: Optional[bool] = None,
1717
+ return_dict: Optional[bool] = None,
1718
+ labels: np.ndarray | tf.Tensor | None = None,
1719
+ training: Optional[bool] = False,
1720
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1721
+ r"""
1722
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1723
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1724
+ the model is configured as a decoder.
1725
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1726
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1727
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1728
+
1729
+ - 1 for tokens that are **not masked**,
1730
+ - 0 for tokens that are **masked**.
1731
+
1732
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1733
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1734
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1735
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1736
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1737
+ use_cache (`bool`, *optional*, defaults to `True`):
1738
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1739
+ `past_key_values`). Set to `False` during training, `True` during generation
1740
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1741
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1742
+ config.vocab_size - 1]`.
1743
+ """
1744
+ outputs = self.roberta(
1745
+ input_ids=input_ids,
1746
+ attention_mask=attention_mask,
1747
+ token_type_ids=token_type_ids,
1748
+ position_ids=position_ids,
1749
+ head_mask=head_mask,
1750
+ inputs_embeds=inputs_embeds,
1751
+ encoder_hidden_states=encoder_hidden_states,
1752
+ encoder_attention_mask=encoder_attention_mask,
1753
+ past_key_values=past_key_values,
1754
+ use_cache=use_cache,
1755
+ output_attentions=output_attentions,
1756
+ output_hidden_states=output_hidden_states,
1757
+ return_dict=return_dict,
1758
+ training=training,
1759
+ )
1760
+
1761
+ sequence_output = outputs[0]
1762
+ logits = self.lm_head(hidden_states=sequence_output, training=training)
1763
+ loss = None
1764
+
1765
+ if labels is not None:
1766
+ # shift labels to the left and cut last logit token
1767
+ shifted_logits = logits[:, :-1]
1768
+ labels = labels[:, 1:]
1769
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1770
+
1771
+ if not return_dict:
1772
+ output = (logits,) + outputs[2:]
1773
+ return ((loss,) + output) if loss is not None else output
1774
+
1775
+ return TFCausalLMOutputWithCrossAttentions(
1776
+ loss=loss,
1777
+ logits=logits,
1778
+ past_key_values=outputs.past_key_values,
1779
+ hidden_states=outputs.hidden_states,
1780
+ attentions=outputs.attentions,
1781
+ cross_attentions=outputs.cross_attentions,
1782
+ )
1783
+
1784
+ def build(self, input_shape=None):
1785
+ if self.built:
1786
+ return
1787
+ self.built = True
1788
+ if getattr(self, "roberta", None) is not None:
1789
+ with tf.name_scope(self.roberta.name):
1790
+ self.roberta.build(None)
1791
+ if getattr(self, "lm_head", None) is not None:
1792
+ with tf.name_scope(self.lm_head.name):
1793
+ self.lm_head.build(None)
venv/lib/python3.10/site-packages/transformers/models/camembert/tokenization_camembert.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for Camembert model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+
36
+ class CamembertTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Construct a CamemBERT tokenizer. Based on
39
+ [SentencePiece](https://github.com/google/sentencepiece).
40
+
41
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
42
+ this superclass for more information regarding those methods.
43
+
44
+ Args:
45
+ vocab_file (`str`):
46
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
47
+ contains the vocabulary necessary to instantiate a tokenizer.
48
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
49
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
50
+
51
+ <Tip>
52
+
53
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
54
+ sequence. The token used is the `cls_token`.
55
+
56
+ </Tip>
57
+
58
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
59
+ The end of sequence token.
60
+
61
+ <Tip>
62
+
63
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
64
+ The token used is the `sep_token`.
65
+
66
+ </Tip>
67
+
68
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
69
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
70
+ sequence classification or for a text and a question for question answering. It is also used as the last
71
+ token of a sequence built with special tokens.
72
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
73
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
74
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
75
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
76
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
77
+ token instead.
78
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
79
+ The token used for padding, for example when batching sequences of different lengths.
80
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
81
+ The token used for masking values. This is the token used when training this model with masked language
82
+ modeling. This is the token which the model will try to predict.
83
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED']`):
84
+ Additional special tokens used by the tokenizer.
85
+ sp_model_kwargs (`dict`, *optional*):
86
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
87
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
88
+ to set:
89
+
90
+ - `enable_sampling`: Enable subword regularization.
91
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
92
+
93
+ - `nbest_size = {0,1}`: No sampling is performed.
94
+ - `nbest_size > 1`: samples from the nbest_size results.
95
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
96
+ using forward-filtering-and-backward-sampling algorithm.
97
+
98
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
99
+ BPE-dropout.
100
+
101
+ Attributes:
102
+ sp_model (`SentencePieceProcessor`):
103
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
104
+ """
105
+
106
+ vocab_files_names = VOCAB_FILES_NAMES
107
+ model_input_names = ["input_ids", "attention_mask"]
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_file,
112
+ bos_token="<s>",
113
+ eos_token="</s>",
114
+ sep_token="</s>",
115
+ cls_token="<s>",
116
+ unk_token="<unk>",
117
+ pad_token="<pad>",
118
+ mask_token="<mask>",
119
+ additional_special_tokens=["<s>NOTUSED", "</s>NOTUSED", "<unk>NOTUSED"],
120
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
121
+ **kwargs,
122
+ ) -> None:
123
+ # Mask token behave like a normal word, i.e. include the space before it
124
+ mask_token = (
125
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False, special=True)
126
+ if isinstance(mask_token, str)
127
+ else mask_token
128
+ )
129
+
130
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
131
+
132
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
133
+ self.sp_model.Load(str(vocab_file))
134
+ self.vocab_file = vocab_file
135
+
136
+ # HACK: These tokens were added by the author for an obscure reason as they were already part of the
137
+ # sentencepiece vocabulary (this is the case for <s> and </s> and <unk>).
138
+ # In this case it is recommended to properly set the tokens by hand.
139
+ self._added_tokens_decoder = {
140
+ 0: AddedToken("<s>NOTUSED", special=True),
141
+ 1: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token,
142
+ 2: AddedToken("</s>NOTUSED", special=True),
143
+ 3: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token,
144
+ 4: AddedToken("<unk>NOTUSED", special=True),
145
+ }
146
+
147
+ self.fairseq_offset = 4 # 3 tokens are newly added, but the offset starts from 4
148
+
149
+ # legacy: camemebert is a particular case were we have to make sure `"<unk>NOTUSED"` is here
150
+ if "added_tokens_decoder" in kwargs:
151
+ # this is the only class that requires this unfortunately.....
152
+ # the reason is that the fast version has a whole.
153
+ kwargs["added_tokens_decoder"].update(self._added_tokens_decoder)
154
+
155
+ super().__init__(
156
+ bos_token=bos_token,
157
+ eos_token=eos_token,
158
+ unk_token=unk_token,
159
+ sep_token=sep_token,
160
+ cls_token=cls_token,
161
+ pad_token=pad_token,
162
+ mask_token=mask_token,
163
+ additional_special_tokens=additional_special_tokens,
164
+ sp_model_kwargs=self.sp_model_kwargs,
165
+ **kwargs,
166
+ )
167
+
168
+ @property
169
+ def vocab_size(self):
170
+ # The length of the vocabulary without added tokens is len(self.sp_model) but the added tokens are added at the beginning.
171
+ return len(self.sp_model)
172
+
173
+ def get_vocab(self):
174
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.fairseq_offset)}
175
+ vocab.update(self.added_tokens_encoder)
176
+ return vocab
177
+
178
+ def _tokenize(self, text: str) -> List[str]:
179
+ return self.sp_model.encode(text, out_type=str)
180
+
181
+ def _convert_token_to_id(self, token):
182
+ """Converts a token (str) in an id using the vocab."""
183
+ # specifi to camembert, both 3 and 4 point to the unk token.
184
+ if self.sp_model.PieceToId(token) == 0:
185
+ # Convert sentence piece unk token to fairseq unk token index
186
+ return self.unk_token_id
187
+ return self.fairseq_offset + self.sp_model.PieceToId(token)
188
+
189
+ def _convert_id_to_token(self, index):
190
+ """Converts an index (integer) in a token (str) using the vocab."""
191
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
192
+
193
+ def convert_tokens_to_string(self, tokens):
194
+ """Converts a sequence of tokens (string) in a single string."""
195
+ # TODO decode outputs do not match between fast and slow
196
+ current_sub_tokens = []
197
+ out_string = ""
198
+ prev_is_special = False
199
+ for token in tokens:
200
+ # make sure that special tokens are not decoded using sentencepiece model
201
+ if token in self.all_special_tokens:
202
+ if not prev_is_special:
203
+ out_string += " "
204
+ out_string += self.sp_model.decode(current_sub_tokens) + token
205
+ prev_is_special = True
206
+ current_sub_tokens = []
207
+ else:
208
+ current_sub_tokens.append(token)
209
+ prev_is_special = False
210
+ out_string += self.sp_model.decode(current_sub_tokens)
211
+ return out_string.strip()
212
+
213
+ def __getstate__(self):
214
+ state = self.__dict__.copy()
215
+ state["sp_model"] = None
216
+ return state
217
+
218
+ def __setstate__(self, d):
219
+ self.__dict__ = d
220
+
221
+ # for backward compatibility
222
+ if not hasattr(self, "sp_model_kwargs"):
223
+ self.sp_model_kwargs = {}
224
+
225
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
226
+ self.sp_model.Load(self.vocab_file)
227
+
228
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
229
+ if not os.path.isdir(save_directory):
230
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
231
+ return
232
+ out_vocab_file = os.path.join(
233
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
234
+ )
235
+
236
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
237
+ copyfile(self.vocab_file, out_vocab_file)
238
+ elif not os.path.isfile(self.vocab_file):
239
+ with open(out_vocab_file, "wb") as fi:
240
+ content_spiece_model = self.sp_model.serialized_model_proto()
241
+ fi.write(content_spiece_model)
242
+
243
+ return (out_vocab_file,)
244
+
245
+ def build_inputs_with_special_tokens(
246
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
247
+ ) -> List[int]:
248
+ """
249
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
250
+ adding special tokens. An CamemBERT sequence has the following format:
251
+
252
+ - single sequence: `<s> X </s>`
253
+ - pair of sequences: `<s> A </s></s> B </s>`
254
+
255
+ Args:
256
+ token_ids_0 (`List[int]`):
257
+ List of IDs to which the special tokens will be added.
258
+ token_ids_1 (`List[int]`, *optional*):
259
+ Optional second list of IDs for sequence pairs.
260
+
261
+ Returns:
262
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
263
+ """
264
+
265
+ if token_ids_1 is None:
266
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
267
+ cls = [self.cls_token_id]
268
+ sep = [self.sep_token_id]
269
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
270
+
271
+ def get_special_tokens_mask(
272
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
273
+ ) -> List[int]:
274
+ """
275
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
276
+ special tokens using the tokenizer `prepare_for_model` method.
277
+
278
+ Args:
279
+ token_ids_0 (`List[int]`):
280
+ List of IDs.
281
+ token_ids_1 (`List[int]`, *optional*):
282
+ Optional second list of IDs for sequence pairs.
283
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
284
+ Whether or not the token list is already formatted with special tokens for the model.
285
+
286
+ Returns:
287
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
288
+ """
289
+ if already_has_special_tokens:
290
+ return super().get_special_tokens_mask(
291
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
292
+ )
293
+
294
+ if token_ids_1 is None:
295
+ return [1] + ([0] * len(token_ids_0)) + [1]
296
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
297
+
298
+ def create_token_type_ids_from_sequences(
299
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
300
+ ) -> List[int]:
301
+ """
302
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
303
+ RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
304
+
305
+ Args:
306
+ token_ids_0 (`List[int]`):
307
+ List of IDs.
308
+ token_ids_1 (`List[int]`, *optional*):
309
+ Optional second list of IDs for sequence pairs.
310
+
311
+ Returns:
312
+ `List[int]`: List of zeros.
313
+ """
314
+ sep = [self.sep_token_id]
315
+ cls = [self.cls_token_id]
316
+
317
+ if token_ids_1 is None:
318
+ return len(cls + token_ids_0 + sep) * [0]
319
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
venv/lib/python3.10/site-packages/transformers/models/camembert/tokenization_camembert_fast.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Fast tokenization classes for Camembert model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_camembert import CamembertTokenizer
29
+ else:
30
+ CamembertTokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
36
+
37
+
38
+ SPIECE_UNDERLINE = "▁"
39
+
40
+
41
+ class CamembertTokenizerFast(PreTrainedTokenizerFast):
42
+ """
43
+ Construct a "fast" CamemBERT tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
44
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
45
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
46
+
47
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
48
+ refer to this superclass for more information regarding those methods.
49
+
50
+ Args:
51
+ vocab_file (`str`):
52
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
53
+ contains the vocabulary necessary to instantiate a tokenizer.
54
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
55
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
56
+
57
+ <Tip>
58
+
59
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
60
+ sequence. The token used is the `cls_token`.
61
+
62
+ </Tip>
63
+
64
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
65
+ The end of sequence token.
66
+
67
+ <Tip>
68
+
69
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
70
+ The token used is the `sep_token`.
71
+
72
+ </Tip>
73
+
74
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
75
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
76
+ sequence classification or for a text and a question for question answering. It is also used as the last
77
+ token of a sequence built with special tokens.
78
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
82
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
83
+ token instead.
84
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
85
+ The token used for padding, for example when batching sequences of different lengths.
86
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
87
+ The token used for masking values. This is the token used when training this model with masked language
88
+ modeling. This is the token which the model will try to predict.
89
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
90
+ Additional special tokens used by the tokenizer.
91
+ """
92
+
93
+ vocab_files_names = VOCAB_FILES_NAMES
94
+ model_input_names = ["input_ids", "attention_mask"]
95
+ slow_tokenizer_class = CamembertTokenizer
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_file=None,
100
+ tokenizer_file=None,
101
+ bos_token="<s>",
102
+ eos_token="</s>",
103
+ sep_token="</s>",
104
+ cls_token="<s>",
105
+ unk_token="<unk>",
106
+ pad_token="<pad>",
107
+ mask_token="<mask>",
108
+ additional_special_tokens=["<s>NOTUSED", "</s>NOTUSED", "<unk>NOTUSED"],
109
+ **kwargs,
110
+ ):
111
+ # Mask token behave like a normal word, i.e. include the space before it. Will have normalized = False
112
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
113
+ super().__init__(
114
+ vocab_file,
115
+ tokenizer_file=tokenizer_file,
116
+ bos_token=bos_token,
117
+ eos_token=eos_token,
118
+ sep_token=sep_token,
119
+ cls_token=cls_token,
120
+ unk_token=unk_token,
121
+ pad_token=pad_token,
122
+ mask_token=mask_token,
123
+ additional_special_tokens=additional_special_tokens,
124
+ **kwargs,
125
+ )
126
+
127
+ self.vocab_file = vocab_file
128
+
129
+ @property
130
+ def can_save_slow_tokenizer(self) -> bool:
131
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
132
+
133
+ def build_inputs_with_special_tokens(
134
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
135
+ ) -> List[int]:
136
+ """
137
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
138
+ adding special tokens. An CamemBERT sequence has the following format:
139
+
140
+ - single sequence: `<s> X </s>`
141
+ - pair of sequences: `<s> A </s></s> B </s>`
142
+
143
+ Args:
144
+ token_ids_0 (`List[int]`):
145
+ List of IDs to which the special tokens will be added.
146
+ token_ids_1 (`List[int]`, *optional*):
147
+ Optional second list of IDs for sequence pairs.
148
+
149
+ Returns:
150
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
151
+ """
152
+
153
+ if token_ids_1 is None:
154
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
155
+ cls = [self.cls_token_id]
156
+ sep = [self.sep_token_id]
157
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
158
+
159
+ def create_token_type_ids_from_sequences(
160
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
161
+ ) -> List[int]:
162
+ """
163
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
164
+ RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
165
+
166
+ Args:
167
+ token_ids_0 (`List[int]`):
168
+ List of IDs.
169
+ token_ids_1 (`List[int]`, *optional*):
170
+ Optional second list of IDs for sequence pairs.
171
+
172
+ Returns:
173
+ `List[int]`: List of zeros.
174
+ """
175
+ sep = [self.sep_token_id]
176
+ cls = [self.cls_token_id]
177
+
178
+ if token_ids_1 is None:
179
+ return len(cls + token_ids_0 + sep) * [0]
180
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
181
+
182
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
183
+ if not self.can_save_slow_tokenizer:
184
+ raise ValueError(
185
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
186
+ "tokenizer."
187
+ )
188
+
189
+ if not os.path.isdir(save_directory):
190
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
191
+ return
192
+ out_vocab_file = os.path.join(
193
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
194
+ )
195
+
196
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
197
+ copyfile(self.vocab_file, out_vocab_file)
198
+
199
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/kosmos2/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_kosmos2": ["KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Kosmos2Config"],
27
+ "processing_kosmos2": ["Kosmos2Processor"],
28
+ }
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_kosmos2"] = [
37
+ "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "Kosmos2ForConditionalGeneration",
39
+ "Kosmos2Model",
40
+ "Kosmos2PreTrainedModel",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_kosmos2 import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, Kosmos2Config
46
+ from .processing_kosmos2 import Kosmos2Processor
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_kosmos2 import (
55
+ KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ Kosmos2ForConditionalGeneration,
57
+ Kosmos2Model,
58
+ Kosmos2PreTrainedModel,
59
+ )
60
+
61
+ else:
62
+ import sys
63
+
64
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/configuration_kosmos2.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/modeling_kosmos2.cpython-310.pyc ADDED
Binary file (64.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/processing_kosmos2.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/kosmos2/configuration_kosmos2.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ KOSMOS-2 model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class Kosmos2TextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a
33
+ KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2
35
+ [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 65037):
42
+ Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`Kosmos2Model`].
44
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ embed_dim (`int`, *optional*, defaults to 2048):
48
+ Dimensionality of the layers and the pooler layer.
49
+ layers (`int`, *optional*, defaults to 24):
50
+ Number of hidden layers in the Transformer encoder.
51
+ ffn_dim (`int`, *optional*, defaults to 8192):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
53
+ attention_heads (`int`, *optional*, defaults to 32):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ activation_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for activations inside the fully connected layer.
64
+ layerdrop (`float`, *optional*, defaults to 0.0):
65
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
66
+ for more details.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
68
+ The epsilon used by the layer normalization layers.
69
+ init_std (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ scale_embedding (`bool`, *optional*, defaults to `True`):
72
+ Scale embeddings by diving by sqrt(embed_dim).
73
+ use_cache (`bool`, *optional*, defaults to `True`):
74
+ Whether or not the model should return the last key/values attentions (not used by all models).
75
+ ```"""
76
+
77
+ model_type = "kosmos_2_text_model"
78
+ keys_to_ignore_at_inference = ["past_key_values"]
79
+ attribute_map = {
80
+ "num_attention_heads": "attention_heads",
81
+ "hidden_size": "embed_dim",
82
+ "num_hidden_layers": "layers",
83
+ }
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_size=65037,
88
+ max_position_embeddings=2048,
89
+ embed_dim=2048,
90
+ layers=24,
91
+ ffn_dim=8192,
92
+ attention_heads=32,
93
+ activation_function="gelu",
94
+ dropout=0.1,
95
+ attention_dropout=0.1,
96
+ activation_dropout=0.0,
97
+ layerdrop=0.0,
98
+ layer_norm_eps=1e-5,
99
+ init_std=0.02,
100
+ scale_embedding=True,
101
+ use_cache=True,
102
+ pad_token_id=1,
103
+ bos_token_id=0,
104
+ eos_token_id=2,
105
+ **kwargs,
106
+ ):
107
+ super().__init__(
108
+ pad_token_id=pad_token_id,
109
+ bos_token_id=bos_token_id,
110
+ eos_token_id=eos_token_id,
111
+ **kwargs,
112
+ )
113
+
114
+ self.vocab_size = vocab_size
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.embed_dim = embed_dim
117
+ self.layers = layers
118
+ self.ffn_dim = ffn_dim
119
+ self.attention_heads = attention_heads
120
+ self.activation_function = activation_function
121
+ self.dropout = dropout
122
+ self.attention_dropout = attention_dropout
123
+ self.activation_dropout = activation_dropout
124
+ self.layerdrop = layerdrop
125
+ self.layer_norm_eps = layer_norm_eps
126
+ self.init_std = init_std
127
+ self.scale_embedding = scale_embedding
128
+ self.use_cache = use_cache
129
+
130
+ @classmethod
131
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
132
+ cls._set_token_in_kwargs(kwargs)
133
+
134
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
135
+
136
+ # get the text config dict if we are loading from Kosmos2Config
137
+ if config_dict.get("model_type") == "kosmos-2":
138
+ config_dict = config_dict["text_config"]
139
+
140
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
141
+ logger.warning(
142
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
143
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
144
+ )
145
+
146
+ return cls.from_dict(config_dict, **kwargs)
147
+
148
+
149
+ class Kosmos2VisionConfig(PretrainedConfig):
150
+ r"""
151
+ This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a
152
+ KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
153
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2
154
+ [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
155
+
156
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
157
+ documentation from [`PretrainedConfig`] for more information.
158
+
159
+ Args:
160
+ hidden_size (`int`, *optional*, defaults to 1024):
161
+ Dimensionality of the encoder layers and the pooler layer.
162
+ intermediate_size (`int`, *optional*, defaults to 4096):
163
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
164
+ num_hidden_layers (`int`, *optional*, defaults to 24):
165
+ Number of hidden layers in the Transformer encoder.
166
+ num_attention_heads (`int`, *optional*, defaults to 16):
167
+ Number of attention heads for each attention layer in the Transformer encoder.
168
+ num_channels (`int`, *optional*, defaults to 3):
169
+ The number of input channels.
170
+ image_size (`int`, *optional*, defaults to 224):
171
+ The size (resolution) of each image.
172
+ patch_size (`int`, *optional*, defaults to 14):
173
+ The size (resolution) of each patch.
174
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
175
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
176
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
177
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
178
+ The epsilon used by the layer normalization layers.
179
+ attention_dropout (`float`, *optional*, defaults to 0.0):
180
+ The dropout ratio for the attention probabilities.
181
+ initializer_range (`float`, *optional*, defaults to 0.02):
182
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
183
+ initializer_factor (`float`, *optional*, defaults to 1):
184
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
185
+ testing).
186
+ ```"""
187
+
188
+ model_type = "kosmos_2_vision_model"
189
+
190
+ def __init__(
191
+ self,
192
+ hidden_size=1024,
193
+ intermediate_size=4096,
194
+ num_hidden_layers=24,
195
+ num_attention_heads=16,
196
+ num_channels=3,
197
+ image_size=224,
198
+ patch_size=14,
199
+ hidden_act="quick_gelu",
200
+ layer_norm_eps=1e-5,
201
+ attention_dropout=0.0,
202
+ initializer_range=0.02,
203
+ initializer_factor=1.0,
204
+ **kwargs,
205
+ ):
206
+ super().__init__(**kwargs)
207
+
208
+ self.hidden_size = hidden_size
209
+ self.intermediate_size = intermediate_size
210
+ self.num_hidden_layers = num_hidden_layers
211
+ self.num_attention_heads = num_attention_heads
212
+ self.num_channels = num_channels
213
+ self.patch_size = patch_size
214
+ self.image_size = image_size
215
+ self.initializer_range = initializer_range
216
+ self.initializer_factor = initializer_factor
217
+ self.attention_dropout = attention_dropout
218
+ self.layer_norm_eps = layer_norm_eps
219
+ self.hidden_act = hidden_act
220
+
221
+ @classmethod
222
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
223
+ cls._set_token_in_kwargs(kwargs)
224
+
225
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
226
+
227
+ # get the vision config dict if we are loading from Kosmos2Config
228
+ if config_dict.get("model_type") == "kosmos-2":
229
+ config_dict = config_dict["vision_config"]
230
+
231
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
232
+ logger.warning(
233
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
234
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
235
+ )
236
+
237
+ return cls.from_dict(config_dict, **kwargs)
238
+
239
+
240
+ class Kosmos2Config(PretrainedConfig):
241
+ r"""
242
+ This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a
243
+ KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
244
+ with the defaults will yield a similar configuration to that of the KOSMOS-2
245
+ [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
246
+
247
+ Args:
248
+ text_config (`dict`, *optional*):
249
+ Dictionary of configuration options used to initialize [`Kosmos2TextConfig`].
250
+ vision_config (`dict`, *optional*):
251
+ Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`].
252
+ latent_query_num (`int`, *optional*, defaults to 64):
253
+ The number of latent query tokens that represent the image features used in the text decoder component.
254
+ kwargs (*optional*):
255
+ Dictionary of keyword arguments.
256
+
257
+ Example:
258
+
259
+ ```python
260
+ >>> from transformers import Kosmos2Config, Kosmos2Model
261
+
262
+ >>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration
263
+ >>> configuration = Kosmos2Config()
264
+
265
+ >>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration
266
+ >>> model = Kosmos2Model(configuration)
267
+
268
+ >>> # Accessing the model configuration
269
+ >>> configuration = model.config
270
+ ```"""
271
+
272
+ model_type = "kosmos-2"
273
+ is_composition = True
274
+
275
+ def __init__(
276
+ self,
277
+ text_config=None,
278
+ vision_config=None,
279
+ latent_query_num=64,
280
+ **kwargs,
281
+ ):
282
+ super().__init__(**kwargs)
283
+
284
+ if text_config is None:
285
+ text_config = {}
286
+ logger.info("`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.")
287
+
288
+ if vision_config is None:
289
+ vision_config = {}
290
+ logger.info("`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.")
291
+
292
+ self.text_config = Kosmos2TextConfig(**text_config)
293
+ self.vision_config = Kosmos2VisionConfig(**vision_config)
294
+
295
+ self.latent_query_num = latent_query_num
venv/lib/python3.10/site-packages/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ from fairseq.checkpoint_utils import load_checkpoint_to_cpu
4
+
5
+ from transformers import Kosmos2Config, Kosmos2ForConditionalGeneration
6
+
7
+
8
+ KEYS_TO_MODIFY_MAPPING = {
9
+ "gpt_model.decoder.output_projection": "text_model.lm_head",
10
+ "gpt_model.decoder": "text_model.model",
11
+ "img_connector": "image_to_text_projection",
12
+ "img_model.visual.class_embedding": "vision_model.model.embeddings.class_embedding",
13
+ "img_model.visual.positional_embedding": "vision_model.model.embeddings.position_embedding.weight",
14
+ "img_model.visual.conv1": "vision_model.model.embeddings.patch_embedding",
15
+ "img_model.visual": "vision_model.model",
16
+ "ln_pre": "pre_layrnorm",
17
+ "ln_post": "post_layernorm",
18
+ "transformer.resblocks": "encoder.layers",
19
+ "ts_attn": "self_attn",
20
+ "ln_1": "layer_norm1",
21
+ "ln_2": "layer_norm2",
22
+ "c_fc": "fc1",
23
+ "c_proj": "fc2",
24
+ }
25
+
26
+
27
+ KEYS_TO_IGNORE = [
28
+ # this buffer in the original code is only used to send weights to the desired device
29
+ "gpt_model.decoder.embed_positions._float_tensor",
30
+ # this weight is never used in the forward in the original KOSMOS-2)
31
+ "gpt_model.decoder.self_attn_sope.scale",
32
+ ]
33
+
34
+
35
+ def rename_key(key):
36
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
37
+ if key_to_modify in key:
38
+ key = key.replace(key_to_modify, new_key)
39
+
40
+ return key
41
+
42
+
43
+ def convert_kosmos2_checkpoint_to_pytorch(checkpoint_path, pytorch_dump_folder_path):
44
+ state = load_checkpoint_to_cpu(checkpoint_path)
45
+ state_dict = state["model"]
46
+ state_dict_keys = list(state_dict.keys())
47
+
48
+ config = Kosmos2Config()
49
+ # This is necessary to match the results given by the original demo
50
+ config.text_config.no_repeat_ngram_size = 3
51
+ model = Kosmos2ForConditionalGeneration(config)
52
+
53
+ # convert (by renaming keys)
54
+ converted_state_dict = {}
55
+ for key in state_dict_keys:
56
+ if key in KEYS_TO_IGNORE:
57
+ continue
58
+ renamed_key = rename_key(key)
59
+ converted_state_dict[renamed_key] = state_dict[key]
60
+
61
+ # check weight loading
62
+ model.load_state_dict(converted_state_dict, strict=True)
63
+ # save the result
64
+ model.save_pretrained(pytorch_dump_folder_path)
65
+
66
+
67
+ if __name__ == "__main__":
68
+ parser = argparse.ArgumentParser()
69
+ # Required parameters
70
+ parser.add_argument(
71
+ "--kosmos2_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
72
+ )
73
+ parser.add_argument(
74
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
75
+ )
76
+ args = parser.parse_args()
77
+ convert_kosmos2_checkpoint_to_pytorch(args.kosmos2_checkpoint_path, args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/kosmos2/modeling_kosmos2.py ADDED
@@ -0,0 +1,2054 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch KOSMOS-2 model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPooling,
32
+ CausalLMOutputWithCrossAttentions,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_kosmos2 import Kosmos2Config, Kosmos2TextConfig, Kosmos2VisionConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = Kosmos2Config
48
+
49
+
50
+ from ..deprecated._archive_maps import KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
54
+ """
55
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
56
+ """
57
+ bsz, src_len = mask.size()
58
+ tgt_len = tgt_len if tgt_len is not None else src_len
59
+
60
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
61
+
62
+ inverted_mask = 1.0 - expanded_mask
63
+
64
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
65
+
66
+
67
+ def _make_causal_mask(
68
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
69
+ ):
70
+ """
71
+ Make causal mask used for bi-directional self-attention.
72
+ """
73
+ bsz, tgt_len = input_ids_shape
74
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
75
+ mask_cond = torch.arange(mask.size(-1), device=device)
76
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
77
+ mask = mask.to(dtype)
78
+
79
+ if past_key_values_length > 0:
80
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
81
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
82
+
83
+
84
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
85
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
86
+ """
87
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
88
+ are ignored. This is modified from fairseq's `utils.make_positions`.
89
+
90
+ Args:
91
+ x: torch.Tensor x:
92
+
93
+ Returns: torch.Tensor
94
+ """
95
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
96
+ mask = input_ids.ne(padding_idx).int()
97
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
98
+ return incremental_indices.long() + padding_idx
99
+
100
+
101
+ KOSMOS2_START_DOCSTRING = r"""
102
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
103
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
104
+ etc.)
105
+
106
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
107
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
108
+ and behavior.
109
+
110
+ Parameters:
111
+ config ([`Kosmos2Config`]): Model configuration class with all the parameters of the model.
112
+ Initializing with a config file does not load the weights associated with the model, only the
113
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
114
+ """
115
+
116
+ KOSMOS2_VISION_INPUTS_DOCSTRING = r"""
117
+ Args:
118
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
119
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
120
+ [`CLIPImageProcessor.__call__`] for details.
121
+ output_attentions (`bool`, *optional*):
122
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
123
+ tensors for more detail.
124
+ output_hidden_states (`bool`, *optional*):
125
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
126
+ more detail.
127
+ return_dict (`bool`, *optional*):
128
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
129
+ """
130
+
131
+ KOSMOS2_TEXT_INPUTS_DOCSTRING = r"""
132
+ Args:
133
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
134
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
135
+ it.
136
+
137
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
138
+ [`PreTrainedTokenizer.__call__`] for details.
139
+
140
+ [What are input IDs?](../glossary#input-ids)
141
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
142
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
143
+
144
+ - 1 for tokens that are **not masked**,
145
+ - 0 for tokens that are **masked**.
146
+
147
+ [What are attention masks?](../glossary#attention-mask)
148
+ image_embeds: (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
149
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
150
+ image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
151
+ Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0,
152
+ 1]`:
153
+
154
+ - 1 for places where to put the image features,
155
+ - 0 for places that are not for image features (i.e. for text tokens).
156
+
157
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
158
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
159
+ the model is configured as a decoder.
160
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
161
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
162
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
163
+
164
+ - 1 for tokens that are **not masked**,
165
+ - 0 for tokens that are **masked**.
166
+
167
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
168
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
169
+
170
+ - 1 indicates the head is **not masked**,
171
+ - 0 indicates the head is **masked**.
172
+
173
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
174
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
175
+
176
+ - 1 indicates the head is **not masked**,
177
+ - 0 indicates the head is **masked**.
178
+
179
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
180
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
181
+
182
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
183
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
184
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
185
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
186
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
187
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
188
+ model's internal embedding lookup matrix.
189
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
190
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
191
+ config.max_position_embeddings - 1]`.
192
+
193
+ [What are position IDs?](../glossary#position-ids)
194
+ use_cache (`bool`, *optional*):
195
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
196
+ `past_key_values`).
197
+ output_attentions (`bool`, *optional*):
198
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
199
+ tensors for more detail.
200
+ output_hidden_states (`bool`, *optional*):
201
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
202
+ more detail.
203
+ return_dict (`bool`, *optional*):
204
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
205
+ """
206
+
207
+ KOSMOS2_INPUTS_DOCSTRING = r"""
208
+ Args:
209
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
210
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
211
+ [`CLIPImageProcessor.__call__`] for details.
212
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
213
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
214
+ it.
215
+
216
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
217
+ [`PreTrainedTokenizer.__call__`] for details.
218
+
219
+ [What are input IDs?](../glossary#input-ids)
220
+ image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
221
+ Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0,
222
+ 1]`:
223
+
224
+ - 1 for places where to put the image features,
225
+ - 0 for places that are not for image features (i.e. for text tokens).
226
+
227
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
228
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
229
+
230
+ - 1 for tokens that are **not masked**,
231
+ - 0 for tokens that are **masked**.
232
+
233
+ [What are attention masks?](../glossary#attention-mask)
234
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
235
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
236
+
237
+ - 1 indicates the head is **not masked**,
238
+ - 0 indicates the head is **masked**.
239
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
240
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
241
+
242
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
243
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
244
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
245
+ image_embeds: (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
246
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
247
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
248
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
249
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
250
+ model's internal embedding lookup matrix.
251
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
252
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
253
+ config.max_position_embeddings - 1]`.
254
+
255
+ [What are position IDs?](../glossary#position-ids)
256
+ use_cache (`bool`, *optional*):
257
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
258
+ `past_key_values`).
259
+ output_attentions (`bool`, *optional*):
260
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
261
+ tensors for more detail.
262
+ output_hidden_states (`bool`, *optional*):
263
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
264
+ more detail.
265
+ return_dict (`bool`, *optional*):
266
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
267
+ """
268
+
269
+
270
+ @dataclass
271
+ class Kosmos2ModelOutput(ModelOutput):
272
+ """
273
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
274
+
275
+ Args:
276
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
277
+ Sequence of hidden-states at the output of the last layer of the model.
278
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
279
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
280
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
281
+
282
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
283
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
284
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
285
+ sequence_length)`.
286
+
287
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
288
+ heads.
289
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
290
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
291
+ projection_attentions (`tuple(torch.FloatTensor)`, *optional*):
292
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
293
+ sequence_length)`.
294
+
295
+ Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute
296
+ the weighted average in the self-attention heads.
297
+ vision_model_output(`BaseModelOutputWithPooling`, *optional*):
298
+ The output of the [`Kosmos2VisionModel`].
299
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
300
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
301
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
302
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
303
+ encoder_sequence_length, embed_size_per_head)`.
304
+
305
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
306
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
307
+ input) to speed up sequential decoding.
308
+ """
309
+
310
+ last_hidden_state: torch.FloatTensor = None
311
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
312
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
313
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
314
+ image_embeds: Optional[torch.FloatTensor] = None
315
+ projection_attentions: Optional[Tuple[torch.FloatTensor]] = None
316
+ vision_model_output: BaseModelOutputWithPooling = None
317
+
318
+ def to_tuple(self) -> Tuple[Any]:
319
+ return tuple(
320
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
321
+ for k in self.keys()
322
+ )
323
+
324
+
325
+ @dataclass
326
+ class Kosmos2ForConditionalGenerationModelOutput(ModelOutput):
327
+ """
328
+ Model output class for `Kosmos2ForConditionalGeneration`.
329
+
330
+ Args:
331
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
332
+ Language modeling loss (for next-token prediction).
333
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
334
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
335
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
336
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
337
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
338
+
339
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
340
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
341
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
342
+ sequence_length)`.
343
+
344
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
345
+ heads.
346
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
347
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
348
+ projection_attentions (`tuple(torch.FloatTensor)`, *optional*):
349
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
350
+ sequence_length)`.
351
+
352
+ Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute
353
+ the weighted average in the self-attention heads.
354
+ vision_model_output(`BaseModelOutputWithPooling`, *optional*):
355
+ The output of the [`Kosmos2VisionModel`].
356
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
357
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
358
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
359
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
360
+ encoder_sequence_length, embed_size_per_head)`.
361
+
362
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
363
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
364
+ input) to speed up sequential decoding.
365
+ """
366
+
367
+ loss: Optional[torch.FloatTensor] = None
368
+ logits: torch.FloatTensor = None
369
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
370
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
371
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
372
+ image_embeds: Optional[torch.FloatTensor] = None
373
+ projection_attentions: Optional[Tuple[torch.FloatTensor]] = None
374
+ vision_model_output: BaseModelOutputWithPooling = None
375
+
376
+ def to_tuple(self) -> Tuple[Any]:
377
+ return tuple(
378
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
379
+ for k in self.keys()
380
+ )
381
+
382
+
383
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Kosmos2
384
+ class Kosmos2VisionEmbeddings(nn.Module):
385
+ def __init__(self, config: Kosmos2VisionConfig):
386
+ super().__init__()
387
+ self.config = config
388
+ self.embed_dim = config.hidden_size
389
+ self.image_size = config.image_size
390
+ self.patch_size = config.patch_size
391
+
392
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
393
+
394
+ self.patch_embedding = nn.Conv2d(
395
+ in_channels=config.num_channels,
396
+ out_channels=self.embed_dim,
397
+ kernel_size=self.patch_size,
398
+ stride=self.patch_size,
399
+ bias=False,
400
+ )
401
+
402
+ self.num_patches = (self.image_size // self.patch_size) ** 2
403
+ self.num_positions = self.num_patches + 1
404
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
405
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
406
+
407
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
408
+ batch_size = pixel_values.shape[0]
409
+ target_dtype = self.patch_embedding.weight.dtype
410
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
411
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
412
+
413
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
414
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
415
+ embeddings = embeddings + self.position_embedding(self.position_ids)
416
+ return embeddings
417
+
418
+
419
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->Kosmos2Vision
420
+ class Kosmos2VisionAttention(nn.Module):
421
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
422
+
423
+ def __init__(self, config):
424
+ super().__init__()
425
+ self.config = config
426
+ self.embed_dim = config.hidden_size
427
+ self.num_heads = config.num_attention_heads
428
+ self.head_dim = self.embed_dim // self.num_heads
429
+ if self.head_dim * self.num_heads != self.embed_dim:
430
+ raise ValueError(
431
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
432
+ f" {self.num_heads})."
433
+ )
434
+ self.scale = self.head_dim**-0.5
435
+ self.dropout = config.attention_dropout
436
+
437
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
438
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
439
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
440
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
441
+
442
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
443
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
444
+
445
+ def forward(
446
+ self,
447
+ hidden_states: torch.Tensor,
448
+ attention_mask: Optional[torch.Tensor] = None,
449
+ causal_attention_mask: Optional[torch.Tensor] = None,
450
+ output_attentions: Optional[bool] = False,
451
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
452
+ """Input shape: Batch x Time x Channel"""
453
+
454
+ bsz, tgt_len, embed_dim = hidden_states.size()
455
+
456
+ # get query proj
457
+ query_states = self.q_proj(hidden_states) * self.scale
458
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
459
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
460
+
461
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
462
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
463
+ key_states = key_states.view(*proj_shape)
464
+ value_states = value_states.view(*proj_shape)
465
+
466
+ src_len = key_states.size(1)
467
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
468
+
469
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
470
+ raise ValueError(
471
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
472
+ f" {attn_weights.size()}"
473
+ )
474
+
475
+ # apply the causal_attention_mask first
476
+ if causal_attention_mask is not None:
477
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
478
+ raise ValueError(
479
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
480
+ f" {causal_attention_mask.size()}"
481
+ )
482
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
483
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
484
+
485
+ if attention_mask is not None:
486
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
487
+ raise ValueError(
488
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
489
+ )
490
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
491
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
492
+
493
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
494
+
495
+ if output_attentions:
496
+ # this operation is a bit akward, but it's required to
497
+ # make sure that attn_weights keeps its gradient.
498
+ # In order to do so, attn_weights have to reshaped
499
+ # twice and have to be reused in the following
500
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
501
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
502
+ else:
503
+ attn_weights_reshaped = None
504
+
505
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
506
+
507
+ attn_output = torch.bmm(attn_probs, value_states)
508
+
509
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
510
+ raise ValueError(
511
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
512
+ f" {attn_output.size()}"
513
+ )
514
+
515
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
516
+ attn_output = attn_output.transpose(1, 2)
517
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
518
+
519
+ attn_output = self.out_proj(attn_output)
520
+
521
+ return attn_output, attn_weights_reshaped
522
+
523
+
524
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Kosmos2Vision
525
+ class Kosmos2VisionMLP(nn.Module):
526
+ def __init__(self, config):
527
+ super().__init__()
528
+ self.config = config
529
+ self.activation_fn = ACT2FN[config.hidden_act]
530
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
531
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
532
+
533
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
534
+ hidden_states = self.fc1(hidden_states)
535
+ hidden_states = self.activation_fn(hidden_states)
536
+ hidden_states = self.fc2(hidden_states)
537
+ return hidden_states
538
+
539
+
540
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Kosmos2Vision
541
+ class Kosmos2VisionEncoderLayer(nn.Module):
542
+ def __init__(self, config: Kosmos2VisionConfig):
543
+ super().__init__()
544
+ self.embed_dim = config.hidden_size
545
+ self.self_attn = Kosmos2VisionAttention(config)
546
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
547
+ self.mlp = Kosmos2VisionMLP(config)
548
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
549
+
550
+ def forward(
551
+ self,
552
+ hidden_states: torch.Tensor,
553
+ attention_mask: torch.Tensor,
554
+ causal_attention_mask: torch.Tensor,
555
+ output_attentions: Optional[bool] = False,
556
+ ) -> Tuple[torch.FloatTensor]:
557
+ """
558
+ Args:
559
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
560
+ attention_mask (`torch.FloatTensor`): attention mask of size
561
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
562
+ `(config.encoder_attention_heads,)`.
563
+ output_attentions (`bool`, *optional*):
564
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
565
+ returned tensors for more detail.
566
+ """
567
+ residual = hidden_states
568
+
569
+ hidden_states = self.layer_norm1(hidden_states)
570
+ hidden_states, attn_weights = self.self_attn(
571
+ hidden_states=hidden_states,
572
+ attention_mask=attention_mask,
573
+ causal_attention_mask=causal_attention_mask,
574
+ output_attentions=output_attentions,
575
+ )
576
+ hidden_states = residual + hidden_states
577
+
578
+ residual = hidden_states
579
+ hidden_states = self.layer_norm2(hidden_states)
580
+ hidden_states = self.mlp(hidden_states)
581
+ hidden_states = residual + hidden_states
582
+
583
+ outputs = (hidden_states,)
584
+
585
+ if output_attentions:
586
+ outputs += (attn_weights,)
587
+
588
+ return outputs
589
+
590
+
591
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Kosmos2Vision
592
+ class Kosmos2VisionEncoder(nn.Module):
593
+ """
594
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
595
+ [`Kosmos2VisionEncoderLayer`].
596
+
597
+ Args:
598
+ config: Kosmos2VisionConfig
599
+ """
600
+
601
+ def __init__(self, config: Kosmos2VisionConfig):
602
+ super().__init__()
603
+ self.config = config
604
+ self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
605
+ self.gradient_checkpointing = False
606
+
607
+ def forward(
608
+ self,
609
+ inputs_embeds,
610
+ attention_mask: Optional[torch.Tensor] = None,
611
+ causal_attention_mask: Optional[torch.Tensor] = None,
612
+ output_attentions: Optional[bool] = None,
613
+ output_hidden_states: Optional[bool] = None,
614
+ return_dict: Optional[bool] = None,
615
+ ) -> Union[Tuple, BaseModelOutput]:
616
+ r"""
617
+ Args:
618
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
619
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
620
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
621
+ than the model's internal embedding lookup matrix.
622
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
623
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
624
+
625
+ - 1 for tokens that are **not masked**,
626
+ - 0 for tokens that are **masked**.
627
+
628
+ [What are attention masks?](../glossary#attention-mask)
629
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
630
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
631
+
632
+ - 1 for tokens that are **not masked**,
633
+ - 0 for tokens that are **masked**.
634
+
635
+ [What are attention masks?](../glossary#attention-mask)
636
+ output_attentions (`bool`, *optional*):
637
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
638
+ returned tensors for more detail.
639
+ output_hidden_states (`bool`, *optional*):
640
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
641
+ for more detail.
642
+ return_dict (`bool`, *optional*):
643
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
644
+ """
645
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
646
+ output_hidden_states = (
647
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
648
+ )
649
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
650
+
651
+ encoder_states = () if output_hidden_states else None
652
+ all_attentions = () if output_attentions else None
653
+
654
+ hidden_states = inputs_embeds
655
+ for idx, encoder_layer in enumerate(self.layers):
656
+ if output_hidden_states:
657
+ encoder_states = encoder_states + (hidden_states,)
658
+ if self.gradient_checkpointing and self.training:
659
+ layer_outputs = self._gradient_checkpointing_func(
660
+ encoder_layer.__call__,
661
+ hidden_states,
662
+ attention_mask,
663
+ causal_attention_mask,
664
+ output_attentions,
665
+ )
666
+ else:
667
+ layer_outputs = encoder_layer(
668
+ hidden_states,
669
+ attention_mask,
670
+ causal_attention_mask,
671
+ output_attentions=output_attentions,
672
+ )
673
+
674
+ hidden_states = layer_outputs[0]
675
+
676
+ if output_attentions:
677
+ all_attentions = all_attentions + (layer_outputs[1],)
678
+
679
+ if output_hidden_states:
680
+ encoder_states = encoder_states + (hidden_states,)
681
+
682
+ if not return_dict:
683
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
684
+ return BaseModelOutput(
685
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
686
+ )
687
+
688
+
689
+ # Similar to `transformers.models.clip.modeling_clip.CLIPVisionTransformer` but without docstring for `forward`
690
+ class Kosmos2VisionTransformer(nn.Module):
691
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPVision->Kosmos2Vision,CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2Vision
692
+ def __init__(self, config: Kosmos2VisionConfig):
693
+ super().__init__()
694
+ self.config = config
695
+ embed_dim = config.hidden_size
696
+
697
+ self.embeddings = Kosmos2VisionEmbeddings(config)
698
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
699
+ self.encoder = Kosmos2VisionEncoder(config)
700
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
701
+
702
+ def forward(
703
+ self,
704
+ pixel_values: Optional[torch.FloatTensor] = None,
705
+ output_attentions: Optional[bool] = None,
706
+ output_hidden_states: Optional[bool] = None,
707
+ return_dict: Optional[bool] = None,
708
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
709
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
710
+ output_hidden_states = (
711
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
712
+ )
713
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
714
+
715
+ if pixel_values is None:
716
+ raise ValueError("You have to specify pixel_values")
717
+
718
+ hidden_states = self.embeddings(pixel_values)
719
+ hidden_states = self.pre_layrnorm(hidden_states)
720
+
721
+ encoder_outputs = self.encoder(
722
+ inputs_embeds=hidden_states,
723
+ output_attentions=output_attentions,
724
+ output_hidden_states=output_hidden_states,
725
+ return_dict=return_dict,
726
+ )
727
+
728
+ last_hidden_state = encoder_outputs[0]
729
+ pooled_output = last_hidden_state[:, 0, :]
730
+ pooled_output = self.post_layernorm(pooled_output)
731
+
732
+ if not return_dict:
733
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
734
+
735
+ return BaseModelOutputWithPooling(
736
+ last_hidden_state=last_hidden_state,
737
+ pooler_output=pooled_output,
738
+ hidden_states=encoder_outputs.hidden_states,
739
+ attentions=encoder_outputs.attentions,
740
+ )
741
+
742
+
743
+ # Similar to `transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding` but allowing to pass `position_ids`
744
+ class Kosmos2TextSinusoidalPositionalEmbedding(nn.Module):
745
+ """This module produces sinusoidal positional embeddings of any length."""
746
+
747
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.__init__
748
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
749
+ super().__init__()
750
+ self.offset = 2
751
+ self.embedding_dim = embedding_dim
752
+ self.padding_idx = padding_idx
753
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
754
+
755
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.make_weights
756
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
757
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
758
+ if hasattr(self, "weights"):
759
+ # in forward put the weights on the correct dtype and device of the param
760
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
761
+
762
+ self.register_buffer("weights", emb_weights, persistent=False)
763
+
764
+ @staticmethod
765
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.get_embedding
766
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
767
+ """
768
+ Build sinusoidal embeddings.
769
+
770
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
771
+ "Attention Is All You Need".
772
+ """
773
+ half_dim = embedding_dim // 2
774
+ emb = math.log(10000) / (half_dim - 1)
775
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
776
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
777
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
778
+ if embedding_dim % 2 == 1:
779
+ # zero pad
780
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
781
+ if padding_idx is not None:
782
+ emb[padding_idx, :] = 0
783
+
784
+ return emb.to(torch.get_default_dtype())
785
+
786
+ @torch.no_grad()
787
+ def forward(
788
+ self,
789
+ input_ids: torch.Tensor = None,
790
+ inputs_embeds: torch.Tensor = None,
791
+ past_key_values_length: int = 0,
792
+ position_ids: torch.Tensor = None,
793
+ ):
794
+ if input_ids is not None:
795
+ bsz, seq_len = input_ids.size()
796
+ if position_ids is None:
797
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
798
+ position_ids = create_position_ids_from_input_ids(
799
+ input_ids, self.padding_idx, past_key_values_length
800
+ ).to(input_ids.device)
801
+ else:
802
+ bsz, seq_len = inputs_embeds.size()[:-1]
803
+ if position_ids is None:
804
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
805
+
806
+ # expand embeddings if needed
807
+ max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
808
+ if max_pos > self.weights.size(0):
809
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
810
+
811
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
812
+
813
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds
814
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
815
+ """
816
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
817
+
818
+ Args:
819
+ inputs_embeds: torch.Tensor
820
+
821
+ Returns: torch.Tensor
822
+ """
823
+ input_shape = inputs_embeds.size()[:-1]
824
+ sequence_length = input_shape[1]
825
+
826
+ position_ids = torch.arange(
827
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
828
+ )
829
+ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
830
+
831
+
832
+ class KosmosTextAttention(nn.Module):
833
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
834
+
835
+ # Similar to transformers.models.bart.modeling_bart.BartAttention.__init__ except an additional `inner_attn_ln`.
836
+ def __init__(
837
+ self,
838
+ config,
839
+ embed_dim: int,
840
+ num_heads: int,
841
+ dropout: float = 0.0,
842
+ is_decoder: bool = False,
843
+ add_inner_attn_layernorm: bool = False,
844
+ bias: bool = True,
845
+ ):
846
+ super().__init__()
847
+ self.embed_dim = embed_dim
848
+ self.num_heads = num_heads
849
+ self.dropout = dropout
850
+ self.head_dim = embed_dim // num_heads
851
+
852
+ if (self.head_dim * num_heads) != self.embed_dim:
853
+ raise ValueError(
854
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
855
+ f" and `num_heads`: {num_heads})."
856
+ )
857
+ self.scaling = self.head_dim**-0.5
858
+ self.is_decoder = is_decoder
859
+
860
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
861
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
862
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
863
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
864
+
865
+ # End opy
866
+ self.inner_attn_ln = None
867
+ if add_inner_attn_layernorm:
868
+ self.inner_attn_ln = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
869
+
870
+ def _shape(self, projection: torch.Tensor) -> torch.Tensor:
871
+ new_projection_shape = projection.size()[:-1] + (self.num_heads, self.head_dim)
872
+ # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D)
873
+ new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3)
874
+ return new_projection
875
+
876
+ def forward(
877
+ self,
878
+ hidden_states: torch.Tensor,
879
+ encoder_hidden_states: Optional[torch.Tensor] = None,
880
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ layer_head_mask: Optional[torch.Tensor] = None,
883
+ output_attentions: bool = False,
884
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
885
+ """Input shape: Batch x Time x Channel"""
886
+
887
+ # if key_value_states are provided this layer is used as a cross-attention layer
888
+ # for the decoder
889
+ is_cross_attention = encoder_hidden_states is not None
890
+ batch_size, seq_length = hidden_states.shape[:2]
891
+
892
+ # use encoder_hidden_states if cross attention
893
+ current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
894
+ # checking that the `sequence_length` of the `past_key_value` is the same as the he provided
895
+ # `encoder_hidden_states` to support prefix tuning
896
+ if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]:
897
+ # reuse k,v, cross_attentions
898
+ key_states = past_key_value[0]
899
+ value_states = past_key_value[1]
900
+ else:
901
+ key_states = self._shape(self.k_proj(current_states))
902
+ value_states = self._shape(self.v_proj(current_states))
903
+ if past_key_value is not None and not is_cross_attention:
904
+ # reuse k, v, self_attention
905
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
906
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
907
+
908
+ query_states = self._shape(self.q_proj(hidden_states) * self.scaling)
909
+ attn_weights = torch.matmul(query_states, key_states.transpose(-1, -2))
910
+
911
+ if self.is_decoder:
912
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
913
+ # Further calls to cross_attention layer can then reuse all cross-attention
914
+ # key/value_states (first "if" case)
915
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
916
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
917
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
918
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
919
+ past_key_value = (key_states, value_states)
920
+
921
+ src_len = key_states.size(2)
922
+
923
+ if attention_mask is not None:
924
+ if attention_mask.size() != (batch_size, 1, seq_length, src_len):
925
+ raise ValueError(
926
+ f"Attention mask should be of size {(batch_size, 1, seq_length, src_len)}, but is {attention_mask.size()}"
927
+ )
928
+ attn_weights = attn_weights + attention_mask
929
+
930
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
931
+
932
+ # Mask heads if we want to
933
+ if layer_head_mask is not None:
934
+ attn_weights = attn_weights * layer_head_mask
935
+
936
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
937
+
938
+ # attn_output = torch.bmm(attn_probs, value_states) ?
939
+ context_states = torch.matmul(attn_weights, value_states)
940
+ # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ?
941
+ context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
942
+
943
+ if self.inner_attn_ln is not None:
944
+ context_states = self.inner_attn_ln(context_states)
945
+
946
+ attn_output = self.out_proj(context_states)
947
+
948
+ return attn_output, attn_weights, past_key_value
949
+
950
+
951
+ class Kosmos2TextFFN(nn.Module):
952
+ def __init__(self, config: Kosmos2TextConfig):
953
+ super().__init__()
954
+
955
+ self.dropout = config.dropout
956
+ self.activation_fn = ACT2FN[config.activation_function]
957
+ self.activation_dropout = config.activation_dropout
958
+
959
+ self.fc1 = nn.Linear(config.embed_dim, config.ffn_dim)
960
+ self.fc2 = nn.Linear(config.ffn_dim, config.embed_dim)
961
+
962
+ self.ffn_layernorm = nn.LayerNorm(config.ffn_dim, eps=config.layer_norm_eps)
963
+
964
+ def forward(self, hidden_states):
965
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
966
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
967
+ hidden_states = self.ffn_layernorm(hidden_states)
968
+ hidden_states = self.fc2(hidden_states)
969
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
970
+
971
+ return hidden_states
972
+
973
+
974
+ class Kosmos2TextBlock(nn.Module):
975
+ def __init__(self, config: Kosmos2TextConfig):
976
+ super().__init__()
977
+ self.embed_dim = config.embed_dim
978
+
979
+ self.self_attn = KosmosTextAttention(
980
+ config,
981
+ embed_dim=self.embed_dim,
982
+ num_heads=config.attention_heads,
983
+ dropout=config.attention_dropout,
984
+ is_decoder=True,
985
+ add_inner_attn_layernorm=True,
986
+ )
987
+ self.dropout = config.dropout
988
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
989
+
990
+ if config.add_cross_attention:
991
+ self.encoder_attn = KosmosTextAttention(
992
+ config,
993
+ embed_dim=self.embed_dim,
994
+ num_heads=config.attention_heads,
995
+ dropout=config.attention_dropout,
996
+ is_decoder=True,
997
+ add_inner_attn_layernorm=False,
998
+ )
999
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
1000
+
1001
+ self.ffn = Kosmos2TextFFN(config)
1002
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
1003
+
1004
+ def forward(
1005
+ self,
1006
+ hidden_states: torch.Tensor,
1007
+ attention_mask: Optional[torch.Tensor] = None,
1008
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1009
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1010
+ layer_head_mask: Optional[torch.Tensor] = None,
1011
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
1012
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1013
+ output_attentions: Optional[bool] = False,
1014
+ use_cache: Optional[bool] = True,
1015
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1016
+ residual = hidden_states
1017
+
1018
+ # Self Attention
1019
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
1020
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
1021
+
1022
+ hidden_states = self.self_attn_layer_norm(hidden_states)
1023
+
1024
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
1025
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1026
+ hidden_states=hidden_states,
1027
+ past_key_value=self_attn_past_key_value,
1028
+ attention_mask=attention_mask,
1029
+ layer_head_mask=layer_head_mask,
1030
+ output_attentions=output_attentions,
1031
+ )
1032
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1033
+ hidden_states = residual + hidden_states
1034
+
1035
+ # Cross-Attention Block
1036
+ cross_attn_present_key_value = None
1037
+ cross_attn_weights = None
1038
+ if encoder_hidden_states is not None:
1039
+ if not hasattr(self, "encoder_attn"):
1040
+ raise ValueError(
1041
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
1042
+ " by setting `config.add_cross_attention=True`"
1043
+ )
1044
+
1045
+ residual = hidden_states
1046
+
1047
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
1048
+
1049
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
1050
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
1051
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
1052
+ hidden_states=hidden_states,
1053
+ encoder_hidden_states=encoder_hidden_states,
1054
+ attention_mask=encoder_attention_mask,
1055
+ layer_head_mask=cross_attn_layer_head_mask,
1056
+ past_key_value=cross_attn_past_key_value,
1057
+ output_attentions=output_attentions,
1058
+ )
1059
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1060
+ hidden_states = residual + hidden_states
1061
+
1062
+ # add cross-attn to positions 3,4 of present_key_value tuple
1063
+ present_key_value = present_key_value + cross_attn_present_key_value
1064
+
1065
+ # Fully Connected
1066
+ residual = hidden_states
1067
+
1068
+ hidden_states = self.final_layer_norm(hidden_states)
1069
+
1070
+ # FFN
1071
+ hidden_states = self.ffn(hidden_states)
1072
+ hidden_states = residual + hidden_states
1073
+
1074
+ outputs = (hidden_states,)
1075
+
1076
+ if output_attentions:
1077
+ outputs += (self_attn_weights, cross_attn_weights)
1078
+
1079
+ if use_cache:
1080
+ outputs += (present_key_value,)
1081
+
1082
+ return outputs
1083
+
1084
+
1085
+ class Kosmos2TextTransformer(nn.Module):
1086
+ """
1087
+ Transformer decoder consisting of `config.layers` layers. Each layer is a [`Kosmos2TextBlock`].
1088
+
1089
+ Args:
1090
+ config: Kosmos2TextConfig
1091
+ """
1092
+
1093
+ def __init__(self, config: Kosmos2TextConfig):
1094
+ super().__init__()
1095
+ self.config = config
1096
+ self.dropout = config.dropout
1097
+ self.layerdrop = config.layerdrop
1098
+
1099
+ self.embed_scale = math.sqrt(config.embed_dim) if config.scale_embedding else 1.0
1100
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.embed_dim, padding_idx=config.pad_token_id)
1101
+
1102
+ self.embed_positions = Kosmos2TextSinusoidalPositionalEmbedding(
1103
+ num_positions=config.max_position_embeddings,
1104
+ embedding_dim=config.embed_dim,
1105
+ padding_idx=config.pad_token_id,
1106
+ )
1107
+
1108
+ self.layers = nn.ModuleList([Kosmos2TextBlock(config) for _ in range(config.layers)])
1109
+ self.layer_norm = nn.LayerNorm(config.embed_dim, config.layer_norm_eps)
1110
+
1111
+ self.gradient_checkpointing = False
1112
+
1113
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
1114
+ # create causal mask
1115
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1116
+ combined_attention_mask = None
1117
+ if input_shape[-1] > 1:
1118
+ combined_attention_mask = _make_causal_mask(
1119
+ input_shape,
1120
+ inputs_embeds.dtype,
1121
+ device=inputs_embeds.device,
1122
+ past_key_values_length=past_key_values_length,
1123
+ )
1124
+
1125
+ if attention_mask is not None:
1126
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1127
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
1128
+ inputs_embeds.device
1129
+ )
1130
+ combined_attention_mask = (
1131
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
1132
+ )
1133
+
1134
+ return combined_attention_mask
1135
+
1136
+ def forward_embedding(
1137
+ self,
1138
+ input_ids,
1139
+ inputs_embeds: torch.Tensor = None,
1140
+ image_embeds: torch.Tensor = None,
1141
+ img_input_mask: torch.Tensor = None,
1142
+ past_key_values_length: int = 0,
1143
+ position_ids: torch.Tensor = None,
1144
+ ):
1145
+ # The argument `inputs_embeds` should be the one without being multiplied by `self.embed_scale`.
1146
+ if inputs_embeds is None:
1147
+ inputs_embeds = self.embed_tokens(input_ids)
1148
+
1149
+ if image_embeds is not None:
1150
+ inputs_embeds[img_input_mask.to(dtype=torch.bool)] = image_embeds.to(inputs_embeds.device).view(
1151
+ -1, image_embeds.size(-1)
1152
+ )
1153
+
1154
+ inputs_embeds = inputs_embeds * self.embed_scale
1155
+
1156
+ # embed positions
1157
+ positions = self.embed_positions(
1158
+ input_ids=input_ids,
1159
+ inputs_embeds=inputs_embeds,
1160
+ past_key_values_length=past_key_values_length,
1161
+ position_ids=position_ids,
1162
+ )
1163
+ positions = positions.to(inputs_embeds.device)
1164
+
1165
+ hidden_states = inputs_embeds + positions
1166
+
1167
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1168
+
1169
+ return hidden_states
1170
+
1171
+ def forward(
1172
+ self,
1173
+ input_ids: Optional[torch.Tensor] = None,
1174
+ attention_mask: Optional[torch.Tensor] = None,
1175
+ image_embeds: Optional[torch.Tensor] = None,
1176
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1177
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1178
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1179
+ head_mask: Optional[torch.Tensor] = None,
1180
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1181
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1182
+ inputs_embeds: Optional[torch.Tensor] = None,
1183
+ position_ids: Optional[torch.Tensor] = None,
1184
+ use_cache: Optional[bool] = None,
1185
+ output_attentions: Optional[bool] = None,
1186
+ output_hidden_states: Optional[bool] = None,
1187
+ return_dict: Optional[bool] = None,
1188
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
1189
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1190
+ output_hidden_states = (
1191
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1192
+ )
1193
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1194
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1195
+
1196
+ if input_ids is not None and inputs_embeds is not None:
1197
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1198
+ elif input_ids is not None:
1199
+ input_shape = input_ids.shape
1200
+ input_ids = input_ids.view(-1, input_shape[-1])
1201
+ elif inputs_embeds is not None:
1202
+ input_shape = inputs_embeds.size()[:-1]
1203
+ else:
1204
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1205
+
1206
+ # past_key_values_length
1207
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1208
+
1209
+ # We don't need img info. when `past_key_values_length` > 0
1210
+ if past_key_values_length > 0:
1211
+ image_embeds = None
1212
+ image_embeds_position_mask = None
1213
+
1214
+ hidden_states = self.forward_embedding(
1215
+ input_ids=input_ids,
1216
+ inputs_embeds=inputs_embeds,
1217
+ image_embeds=image_embeds,
1218
+ img_input_mask=image_embeds_position_mask,
1219
+ past_key_values_length=past_key_values_length,
1220
+ position_ids=position_ids,
1221
+ )
1222
+
1223
+ attention_mask = self._prepare_decoder_attention_mask(
1224
+ attention_mask, input_shape, hidden_states, past_key_values_length
1225
+ )
1226
+
1227
+ # expand encoder attention mask
1228
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1229
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1230
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
1231
+
1232
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1233
+
1234
+ if self.gradient_checkpointing and self.training:
1235
+ if use_cache:
1236
+ logger.warning_once(
1237
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1238
+ )
1239
+ use_cache = False
1240
+
1241
+ # decoder layers
1242
+ all_hidden_states = () if output_hidden_states else None
1243
+ all_self_attns = () if output_attentions else None
1244
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
1245
+ present_key_value_states = () if use_cache else None
1246
+
1247
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1248
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1249
+ if attn_mask is not None:
1250
+ if attn_mask.size()[0] != (len(self.layers)):
1251
+ raise ValueError(
1252
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1253
+ f" {head_mask.size()[0]}."
1254
+ )
1255
+
1256
+ for idx, decoder_layer in enumerate(self.layers):
1257
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1258
+ if output_hidden_states:
1259
+ all_hidden_states += (hidden_states,)
1260
+ if self.training:
1261
+ dropout_probability = torch.rand([])
1262
+ if dropout_probability < self.layerdrop:
1263
+ continue
1264
+
1265
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1266
+
1267
+ if self.gradient_checkpointing and self.training:
1268
+ layer_outputs = self._gradient_checkpointing_func(
1269
+ decoder_layer.__call__,
1270
+ hidden_states,
1271
+ attention_mask,
1272
+ encoder_hidden_states,
1273
+ encoder_attention_mask,
1274
+ head_mask[idx] if head_mask is not None else None,
1275
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1276
+ None,
1277
+ output_attentions,
1278
+ use_cache,
1279
+ )
1280
+ else:
1281
+ layer_outputs = decoder_layer(
1282
+ hidden_states,
1283
+ attention_mask=attention_mask,
1284
+ encoder_hidden_states=encoder_hidden_states,
1285
+ encoder_attention_mask=encoder_attention_mask,
1286
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1287
+ cross_attn_layer_head_mask=(
1288
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1289
+ ),
1290
+ past_key_value=past_key_value,
1291
+ output_attentions=output_attentions,
1292
+ use_cache=use_cache,
1293
+ )
1294
+ hidden_states = layer_outputs[0]
1295
+
1296
+ if use_cache:
1297
+ present_key_value_states += (layer_outputs[3 if output_attentions else 1],)
1298
+
1299
+ if output_attentions:
1300
+ all_self_attns += (layer_outputs[1],)
1301
+
1302
+ if encoder_hidden_states is not None:
1303
+ all_cross_attentions += (layer_outputs[2],)
1304
+
1305
+ # add final layer norm
1306
+ hidden_states = self.layer_norm(hidden_states)
1307
+
1308
+ # add hidden states from the last decoder layer
1309
+ if output_hidden_states:
1310
+ all_hidden_states += (hidden_states,)
1311
+
1312
+ if not return_dict:
1313
+ return tuple(
1314
+ v
1315
+ for v in [
1316
+ hidden_states,
1317
+ present_key_value_states,
1318
+ all_hidden_states,
1319
+ all_self_attns,
1320
+ all_cross_attentions,
1321
+ ]
1322
+ if v is not None
1323
+ )
1324
+ return BaseModelOutputWithPastAndCrossAttentions(
1325
+ last_hidden_state=hidden_states,
1326
+ past_key_values=present_key_value_states,
1327
+ hidden_states=all_hidden_states,
1328
+ attentions=all_self_attns,
1329
+ cross_attentions=all_cross_attentions,
1330
+ )
1331
+
1332
+
1333
+ class Kosmos2PreTrainedModel(PreTrainedModel):
1334
+ """
1335
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1336
+ models.
1337
+ """
1338
+
1339
+ config_class = Kosmos2Config
1340
+ supports_gradient_checkpointing = True
1341
+ _no_split_modules = ["Kosmos2VisionEncoderLayer", "Kosmos2TextBlock"]
1342
+
1343
+ def _init_weights(self, module):
1344
+ """Initialize the weights"""
1345
+ if isinstance(self, Kosmos2VisionModel):
1346
+ factor = self.config.initializer_factor
1347
+ elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)):
1348
+ factor = self.config.vision_config.initializer_factor
1349
+
1350
+ if isinstance(self, (Kosmos2TextModel, Kosmos2TextForCausalLM)):
1351
+ std = self.config.init_std
1352
+ elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)):
1353
+ std = self.config.text_config.init_std
1354
+
1355
+ if isinstance(module, Kosmos2VisionEmbeddings):
1356
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
1357
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
1358
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
1359
+ elif isinstance(module, Kosmos2VisionAttention):
1360
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1361
+ out_proj_std = (module.embed_dim**-0.5) * factor
1362
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
1363
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
1364
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
1365
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
1366
+ if module.q_proj.bias is not None:
1367
+ module.q_proj.bias.data.zero_()
1368
+ if module.k_proj.bias is not None:
1369
+ module.k_proj.bias.data.zero_()
1370
+ if module.v_proj.bias is not None:
1371
+ module.v_proj.bias.data.zero_()
1372
+ if module.out_proj.bias is not None:
1373
+ module.out_proj.bias.data.zero_()
1374
+ elif isinstance(module, Kosmos2VisionMLP):
1375
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1376
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
1377
+ nn.init.normal_(module.fc1.weight, std=fc_std)
1378
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
1379
+ if module.fc1.bias is not None:
1380
+ module.fc1.bias.data.zero_()
1381
+ if module.fc2.bias is not None:
1382
+ module.fc2.bias.data.zero_()
1383
+ elif isinstance(module, Kosmos2VisionEncoderLayer):
1384
+ module.layer_norm1.bias.data.zero_()
1385
+ module.layer_norm1.weight.data.fill_(1.0)
1386
+ module.layer_norm2.bias.data.zero_()
1387
+ module.layer_norm2.weight.data.fill_(1.0)
1388
+ elif isinstance(module, Kosmos2VisionTransformer):
1389
+ module.pre_layrnorm.bias.data.zero_()
1390
+ module.pre_layrnorm.weight.data.fill_(1.0)
1391
+ module.post_layernorm.bias.data.zero_()
1392
+ module.post_layernorm.weight.data.fill_(1.0)
1393
+ elif isinstance(module, KosmosTextAttention):
1394
+ nn.init.normal_(module.q_proj.weight, std=std)
1395
+ nn.init.normal_(module.k_proj.weight, std=std)
1396
+ nn.init.normal_(module.v_proj.weight, std=std)
1397
+ nn.init.normal_(module.out_proj.weight, std=std)
1398
+ if module.q_proj.bias is not None:
1399
+ module.q_proj.bias.data.zero_()
1400
+ if module.k_proj.bias is not None:
1401
+ module.k_proj.bias.data.zero_()
1402
+ if module.v_proj.bias is not None:
1403
+ module.v_proj.bias.data.zero_()
1404
+ if module.out_proj.bias is not None:
1405
+ module.out_proj.bias.data.zero_()
1406
+ elif isinstance(module, Kosmos2TextFFN):
1407
+ nn.init.normal_(module.fc1.weight, std=std)
1408
+ nn.init.normal_(module.fc2.weight, std=std)
1409
+ if module.fc1.bias is not None:
1410
+ module.fc1.bias.data.zero_()
1411
+ if module.fc2.bias is not None:
1412
+ module.fc2.bias.data.zero_()
1413
+ elif isinstance(module, Kosmos2TextForCausalLM):
1414
+ nn.init.normal_(module.lm_head.weight, std=std)
1415
+ if module.lm_head.bias is not None:
1416
+ module.lm_head.bias.data.zero_()
1417
+ elif isinstance(module, Kosmos2ImageToTextProjection):
1418
+ nn.init.normal_(module.dense.weight, std=std)
1419
+ if module.dense.bias is not None:
1420
+ module.dense.bias.data.zero_()
1421
+ elif isinstance(module, Kosmos2TextTransformer):
1422
+ module.embed_tokens.weight.data.normal_(mean=0.0, std=std)
1423
+ if module.embed_tokens.padding_idx is not None:
1424
+ module.embed_tokens.weight.data[module.embed_tokens.padding_idx].zero_()
1425
+
1426
+
1427
+ class Kosmos2VisionModel(Kosmos2PreTrainedModel):
1428
+ config_class = Kosmos2VisionConfig
1429
+ main_input_name = "pixel_values"
1430
+
1431
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model
1432
+ def __init__(self, config: Kosmos2VisionConfig):
1433
+ super().__init__(config)
1434
+ self.model = Kosmos2VisionTransformer(config)
1435
+ # Initialize weights and apply final processing
1436
+ self.post_init()
1437
+
1438
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.get_input_embeddings with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model
1439
+ def get_input_embeddings(self) -> nn.Module:
1440
+ return self.model.embeddings.patch_embedding
1441
+
1442
+ @add_start_docstrings_to_model_forward(KOSMOS2_VISION_INPUTS_DOCSTRING)
1443
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Kosmos2VisionConfig)
1444
+ def forward(
1445
+ self,
1446
+ pixel_values: Optional[torch.FloatTensor] = None,
1447
+ output_attentions: Optional[bool] = None,
1448
+ output_hidden_states: Optional[bool] = None,
1449
+ return_dict: Optional[bool] = None,
1450
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1451
+ r"""
1452
+ Returns:
1453
+
1454
+ """
1455
+ return self.model(
1456
+ pixel_values=pixel_values,
1457
+ output_attentions=output_attentions,
1458
+ output_hidden_states=output_hidden_states,
1459
+ return_dict=return_dict,
1460
+ )
1461
+
1462
+
1463
+ class Kosmos2TextModel(Kosmos2PreTrainedModel):
1464
+ config_class = Kosmos2TextConfig
1465
+
1466
+ def __init__(self, config: Kosmos2TextConfig):
1467
+ super().__init__(config)
1468
+ self.model = Kosmos2TextTransformer(config)
1469
+ # Initialize weights and apply final processing
1470
+ self.post_init()
1471
+
1472
+ def get_input_embeddings(self) -> nn.Module:
1473
+ return self.model.embed_tokens
1474
+
1475
+ def set_input_embeddings(self, value):
1476
+ self.model.embed_tokens = value
1477
+
1478
+ @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING)
1479
+ @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=Kosmos2TextConfig)
1480
+ def forward(
1481
+ self,
1482
+ input_ids: Optional[torch.Tensor] = None,
1483
+ attention_mask: Optional[torch.Tensor] = None,
1484
+ image_embeds: Optional[torch.Tensor] = None,
1485
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1486
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1487
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1488
+ head_mask: Optional[torch.Tensor] = None,
1489
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1490
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1491
+ inputs_embeds: Optional[torch.Tensor] = None,
1492
+ position_ids: Optional[torch.Tensor] = None,
1493
+ use_cache: Optional[bool] = None,
1494
+ output_attentions: Optional[bool] = None,
1495
+ output_hidden_states: Optional[bool] = None,
1496
+ return_dict: Optional[bool] = None,
1497
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
1498
+ r"""
1499
+ Returns:
1500
+
1501
+ """
1502
+ return self.model(
1503
+ input_ids=input_ids,
1504
+ attention_mask=attention_mask,
1505
+ image_embeds=image_embeds,
1506
+ image_embeds_position_mask=image_embeds_position_mask,
1507
+ encoder_hidden_states=encoder_hidden_states,
1508
+ encoder_attention_mask=encoder_attention_mask,
1509
+ head_mask=head_mask,
1510
+ cross_attn_head_mask=cross_attn_head_mask,
1511
+ past_key_values=past_key_values,
1512
+ inputs_embeds=inputs_embeds,
1513
+ position_ids=position_ids,
1514
+ use_cache=use_cache,
1515
+ output_attentions=output_attentions,
1516
+ output_hidden_states=output_hidden_states,
1517
+ return_dict=return_dict,
1518
+ )
1519
+
1520
+
1521
+ @add_start_docstrings(
1522
+ """
1523
+ The text model from KOSMOS-2 with a language modeling head on top (linear layer with weights tied to the input
1524
+ embeddings).
1525
+ """,
1526
+ KOSMOS2_START_DOCSTRING,
1527
+ )
1528
+ class Kosmos2TextForCausalLM(Kosmos2PreTrainedModel):
1529
+ config_class = Kosmos2TextConfig
1530
+ _tied_weights_keys = ["lm_head.weight"]
1531
+
1532
+ def __init__(self, config: Kosmos2TextConfig):
1533
+ super().__init__(config)
1534
+
1535
+ self.model = Kosmos2TextTransformer(config)
1536
+ self.lm_head = nn.Linear(in_features=config.embed_dim, out_features=config.vocab_size, bias=False)
1537
+
1538
+ # Initialize weights and apply final processing
1539
+ self.post_init()
1540
+
1541
+ def get_input_embeddings(self) -> nn.Module:
1542
+ return self.model.embed_tokens
1543
+
1544
+ def set_input_embeddings(self, value):
1545
+ self.model.embed_tokens = value
1546
+
1547
+ def get_output_embeddings(self) -> nn.Module:
1548
+ return self.lm_head
1549
+
1550
+ def set_output_embeddings(self, new_embeddings):
1551
+ self.lm_head = new_embeddings
1552
+
1553
+ @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING)
1554
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=Kosmos2TextConfig)
1555
+ def forward(
1556
+ self,
1557
+ input_ids: Optional[torch.Tensor] = None,
1558
+ attention_mask: Optional[torch.Tensor] = None,
1559
+ image_embeds: Optional[torch.Tensor] = None,
1560
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1561
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1562
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1563
+ head_mask: Optional[torch.Tensor] = None,
1564
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1565
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1566
+ inputs_embeds: Optional[torch.Tensor] = None,
1567
+ position_ids: Optional[torch.Tensor] = None,
1568
+ labels: Optional[torch.LongTensor] = None,
1569
+ use_cache: Optional[bool] = None,
1570
+ output_attentions: Optional[bool] = None,
1571
+ output_hidden_states: Optional[bool] = None,
1572
+ return_dict: Optional[bool] = None,
1573
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1574
+ r"""
1575
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1576
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1577
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1578
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1579
+
1580
+ Returns:
1581
+
1582
+ """
1583
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1584
+
1585
+ if labels is not None:
1586
+ if use_cache:
1587
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1588
+ use_cache = False
1589
+
1590
+ outputs = self.model(
1591
+ input_ids=input_ids,
1592
+ attention_mask=attention_mask,
1593
+ image_embeds=image_embeds,
1594
+ image_embeds_position_mask=image_embeds_position_mask,
1595
+ encoder_hidden_states=encoder_hidden_states,
1596
+ encoder_attention_mask=encoder_attention_mask,
1597
+ head_mask=head_mask,
1598
+ cross_attn_head_mask=cross_attn_head_mask,
1599
+ past_key_values=past_key_values,
1600
+ inputs_embeds=inputs_embeds,
1601
+ position_ids=position_ids,
1602
+ use_cache=use_cache,
1603
+ output_attentions=output_attentions,
1604
+ output_hidden_states=output_hidden_states,
1605
+ return_dict=return_dict,
1606
+ )
1607
+ lm_logits = self.lm_head(outputs[0])
1608
+
1609
+ loss = None
1610
+ if labels is not None:
1611
+ # move labels to correct device to enable model parallelism
1612
+ labels = labels.to(lm_logits.device)
1613
+ # Shift so that tokens < n predict n
1614
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1615
+ shift_labels = labels[..., 1:].contiguous()
1616
+ batch_size, seq_length, vocab_size = shift_logits.shape
1617
+ # Flatten the tokens
1618
+ loss_fct = CrossEntropyLoss()
1619
+ loss = loss_fct(
1620
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
1621
+ )
1622
+
1623
+ if not return_dict:
1624
+ output = (lm_logits,) + outputs[1:]
1625
+ return (loss,) + output if loss is not None else output
1626
+
1627
+ return CausalLMOutputWithCrossAttentions(
1628
+ loss=loss,
1629
+ logits=lm_logits,
1630
+ past_key_values=outputs.past_key_values,
1631
+ hidden_states=outputs.hidden_states,
1632
+ attentions=outputs.attentions,
1633
+ cross_attentions=outputs.cross_attentions,
1634
+ )
1635
+
1636
+ def prepare_inputs_for_generation(
1637
+ self,
1638
+ input_ids,
1639
+ image_embeds=None,
1640
+ image_embeds_position_mask=None,
1641
+ past_key_values=None,
1642
+ attention_mask=None,
1643
+ use_cache=None,
1644
+ **model_kwargs,
1645
+ ):
1646
+ input_shape = input_ids.shape
1647
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1648
+ if attention_mask is None:
1649
+ attention_mask = input_ids.new_ones(input_shape)
1650
+
1651
+ position_ids = None
1652
+
1653
+ # cut input_ids if past_key_values is used
1654
+ if past_key_values is not None:
1655
+ position_ids = create_position_ids_from_input_ids(
1656
+ input_ids,
1657
+ padding_idx=self.config.pad_token_id,
1658
+ past_key_values_length=0,
1659
+ )[:, -1:]
1660
+
1661
+ input_ids = input_ids[:, -1:]
1662
+ # the image info. is already encoded into the past keys/values
1663
+ image_embeds = None
1664
+ image_embeds_position_mask = None
1665
+ elif image_embeds_position_mask is not None:
1666
+ # appending `False` to `image_embeds_position_mask` (because `input_ids` grows during generation)
1667
+ batch_size, seq_len = input_ids.size()
1668
+ mask_len = image_embeds_position_mask.size()[-1]
1669
+ image_embeds_position_mask = torch.cat(
1670
+ (
1671
+ image_embeds_position_mask,
1672
+ torch.zeros(size=(batch_size, seq_len - mask_len), dtype=torch.bool, device=input_ids.device),
1673
+ ),
1674
+ dim=1,
1675
+ )
1676
+
1677
+ return {
1678
+ "input_ids": input_ids,
1679
+ "image_embeds": image_embeds,
1680
+ "image_embeds_position_mask": image_embeds_position_mask,
1681
+ "past_key_values": past_key_values,
1682
+ "attention_mask": attention_mask,
1683
+ "position_ids": position_ids,
1684
+ "use_cache": use_cache,
1685
+ }
1686
+
1687
+ @staticmethod
1688
+ # Copied from transformers.models.umt5.modeling_umt5.UMT5ForConditionalGeneration._reorder_cache
1689
+ def _reorder_cache(past_key_values, beam_idx):
1690
+ reordered_past = ()
1691
+ for layer_past in past_key_values:
1692
+ reordered_past += (
1693
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1694
+ )
1695
+ return reordered_past
1696
+
1697
+
1698
+ class Kosmos2ImageToTextProjection(nn.Module):
1699
+ """The layer that transforms the image model's output to part of the text model's input (namely, image features)"""
1700
+
1701
+ def __init__(self, config: Kosmos2Config):
1702
+ super().__init__()
1703
+ self.dense = nn.Linear(config.vision_config.hidden_size, config.text_config.embed_dim)
1704
+ self.latent_query = nn.Parameter(torch.randn(config.latent_query_num, config.text_config.embed_dim))
1705
+
1706
+ self.x_attn = KosmosTextAttention(
1707
+ config.text_config,
1708
+ config.text_config.embed_dim,
1709
+ config.text_config.attention_heads,
1710
+ dropout=config.text_config.attention_dropout,
1711
+ is_decoder=False,
1712
+ add_inner_attn_layernorm=False,
1713
+ )
1714
+
1715
+ def forward(self, features):
1716
+ hidden_states = self.dense(features)
1717
+
1718
+ # shape = [batch, latent_query_num, h_dim]
1719
+ latent_query = self.latent_query.unsqueeze(0).expand(hidden_states.size(0), -1, -1)
1720
+ key_value_states = torch.cat([hidden_states, latent_query], dim=1)
1721
+
1722
+ hidden_states, attn_weights, _ = self.x_attn(
1723
+ hidden_states=latent_query,
1724
+ encoder_hidden_states=key_value_states,
1725
+ past_key_value=None,
1726
+ attention_mask=None,
1727
+ output_attentions=None,
1728
+ )
1729
+
1730
+ return hidden_states, attn_weights
1731
+
1732
+
1733
+ @add_start_docstrings(
1734
+ """
1735
+ KOSMOS-2 Model for generating text and image features. The model consists of a vision encoder and a language model.
1736
+ """,
1737
+ KOSMOS2_START_DOCSTRING,
1738
+ )
1739
+ class Kosmos2Model(Kosmos2PreTrainedModel):
1740
+ config_class = Kosmos2Config
1741
+ main_input_name = "pixel_values"
1742
+
1743
+ def __init__(self, config: Kosmos2Config):
1744
+ super().__init__(config)
1745
+
1746
+ self.text_model = Kosmos2TextModel(config.text_config)
1747
+ self.vision_model = Kosmos2VisionModel(config.vision_config)
1748
+ self.image_to_text_projection = Kosmos2ImageToTextProjection(config)
1749
+
1750
+ # Initialize weights and apply final processing
1751
+ self.post_init()
1752
+
1753
+ def get_input_embeddings(self) -> nn.Module:
1754
+ return self.text_model.model.embed_tokens
1755
+
1756
+ def set_input_embeddings(self, value):
1757
+ self.text_model.model.embed_tokens = value
1758
+
1759
+ @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING)
1760
+ @replace_return_docstrings(output_type=Kosmos2ModelOutput, config_class=_CONFIG_FOR_DOC)
1761
+ def forward(
1762
+ self,
1763
+ pixel_values: Optional[torch.Tensor] = None,
1764
+ input_ids: Optional[torch.Tensor] = None,
1765
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1766
+ attention_mask: Optional[torch.Tensor] = None,
1767
+ head_mask: Optional[torch.Tensor] = None,
1768
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1769
+ image_embeds: Optional[torch.Tensor] = None,
1770
+ inputs_embeds: Optional[torch.Tensor] = None,
1771
+ position_ids: Optional[torch.Tensor] = None,
1772
+ use_cache: Optional[bool] = None,
1773
+ output_attentions: Optional[bool] = None,
1774
+ output_hidden_states: Optional[bool] = None,
1775
+ return_dict: Optional[bool] = None,
1776
+ ) -> Union[Tuple, Kosmos2ModelOutput]:
1777
+ r"""
1778
+ Returns:
1779
+
1780
+ Examples:
1781
+
1782
+ ```python
1783
+ >>> from PIL import Image
1784
+ >>> import requests
1785
+ >>> from transformers import AutoProcessor, Kosmos2Model
1786
+
1787
+ >>> model = Kosmos2Model.from_pretrained("microsoft/kosmos-2-patch14-224")
1788
+ >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
1789
+
1790
+ >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
1791
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1792
+
1793
+ >>> text = (
1794
+ ... "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863>"
1795
+ ... "</object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911>"
1796
+ ... "</object>"
1797
+ ... )
1798
+
1799
+ >>> inputs = processor(text=text, images=image, return_tensors="pt", add_eos_token=True)
1800
+
1801
+ >>> last_hidden_state = model(
1802
+ ... pixel_values=inputs["pixel_values"],
1803
+ ... input_ids=inputs["input_ids"],
1804
+ ... attention_mask=inputs["attention_mask"],
1805
+ ... image_embeds_position_mask=inputs["image_embeds_position_mask"],
1806
+ ... ).last_hidden_state
1807
+ >>> list(last_hidden_state.shape)
1808
+ [1, 91, 2048]
1809
+ ```"""
1810
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1811
+ output_hidden_states = (
1812
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1813
+ )
1814
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1815
+
1816
+ vision_model_output = None
1817
+ projection_attentions = None
1818
+ if image_embeds is None:
1819
+ if pixel_values is None:
1820
+ raise ValueError("You have to specify either `pixel_values` or `image_embeds`.")
1821
+
1822
+ vision_model_output = self.vision_model(
1823
+ pixel_values=pixel_values,
1824
+ output_attentions=output_attentions,
1825
+ output_hidden_states=output_hidden_states,
1826
+ return_dict=return_dict,
1827
+ )
1828
+ # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`.
1829
+ image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])
1830
+ # normalized features
1831
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
1832
+ image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
1833
+
1834
+ outputs = self.text_model(
1835
+ input_ids=input_ids,
1836
+ attention_mask=attention_mask,
1837
+ image_embeds=image_embeds,
1838
+ image_embeds_position_mask=image_embeds_position_mask,
1839
+ head_mask=head_mask,
1840
+ past_key_values=past_key_values,
1841
+ inputs_embeds=inputs_embeds,
1842
+ position_ids=position_ids,
1843
+ use_cache=use_cache,
1844
+ output_attentions=output_attentions,
1845
+ output_hidden_states=output_hidden_states,
1846
+ return_dict=return_dict,
1847
+ )
1848
+
1849
+ if not return_dict:
1850
+ outputs = outputs + (image_embeds, projection_attentions, vision_model_output)
1851
+ return tuple(output for output in outputs if output is not None)
1852
+
1853
+ return Kosmos2ModelOutput(
1854
+ last_hidden_state=outputs.last_hidden_state,
1855
+ past_key_values=outputs.past_key_values,
1856
+ hidden_states=outputs.hidden_states,
1857
+ attentions=outputs.attentions,
1858
+ image_embeds=image_embeds,
1859
+ projection_attentions=projection_attentions,
1860
+ vision_model_output=vision_model_output,
1861
+ )
1862
+
1863
+
1864
+ @add_start_docstrings(
1865
+ """
1866
+ KOSMOS-2 Model for generating text and bounding boxes given an image. The model consists of a vision encoder and a
1867
+ language model.
1868
+ """,
1869
+ KOSMOS2_START_DOCSTRING,
1870
+ )
1871
+ class Kosmos2ForConditionalGeneration(Kosmos2PreTrainedModel):
1872
+ config_class = Kosmos2Config
1873
+ main_input_name = "pixel_values"
1874
+ _tied_weights_keys = ["text_model.lm_head.weight"]
1875
+
1876
+ def __init__(self, config: Kosmos2Config):
1877
+ super().__init__(config)
1878
+
1879
+ self.text_model = Kosmos2TextForCausalLM(config.text_config)
1880
+ self.vision_model = Kosmos2VisionModel(config.vision_config)
1881
+
1882
+ self.image_to_text_projection = Kosmos2ImageToTextProjection(config)
1883
+
1884
+ # Initialize weights and apply final processing
1885
+ self.post_init()
1886
+
1887
+ def get_input_embeddings(self) -> nn.Module:
1888
+ return self.text_model.model.embed_tokens
1889
+
1890
+ def set_input_embeddings(self, value):
1891
+ self.text_model.model.embed_tokens = value
1892
+
1893
+ def get_output_embeddings(self) -> nn.Module:
1894
+ return self.text_model.get_output_embeddings()
1895
+
1896
+ def set_output_embeddings(self, new_embeddings):
1897
+ self.text_model.set_output_embeddings(new_embeddings)
1898
+
1899
+ @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING)
1900
+ @replace_return_docstrings(output_type=Kosmos2ForConditionalGenerationModelOutput, config_class=_CONFIG_FOR_DOC)
1901
+ def forward(
1902
+ self,
1903
+ pixel_values: Optional[torch.Tensor] = None,
1904
+ input_ids: Optional[torch.Tensor] = None,
1905
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1906
+ attention_mask: Optional[torch.Tensor] = None,
1907
+ head_mask: Optional[torch.Tensor] = None,
1908
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1909
+ image_embeds: Optional[torch.Tensor] = None,
1910
+ inputs_embeds: Optional[torch.Tensor] = None,
1911
+ position_ids: Optional[torch.Tensor] = None,
1912
+ labels: Optional[torch.LongTensor] = None,
1913
+ use_cache: Optional[bool] = None,
1914
+ output_attentions: Optional[bool] = None,
1915
+ output_hidden_states: Optional[bool] = None,
1916
+ return_dict: Optional[bool] = None,
1917
+ ) -> Union[Tuple, Kosmos2ForConditionalGenerationModelOutput]:
1918
+ r"""
1919
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1920
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1921
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1922
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1923
+
1924
+ Returns:
1925
+
1926
+ Examples:
1927
+
1928
+ ```python
1929
+ >>> from PIL import Image
1930
+ >>> import requests
1931
+ >>> from transformers import AutoProcessor, Kosmos2ForConditionalGeneration
1932
+
1933
+ >>> model = Kosmos2ForConditionalGeneration.from_pretrained("microsoft/kosmos-2-patch14-224")
1934
+ >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
1935
+
1936
+ >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
1937
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1938
+
1939
+ >>> prompt = "<grounding> An image of"
1940
+
1941
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
1942
+
1943
+ >>> generated_ids = model.generate(
1944
+ ... pixel_values=inputs["pixel_values"],
1945
+ ... input_ids=inputs["input_ids"],
1946
+ ... attention_mask=inputs["attention_mask"],
1947
+ ... image_embeds=None,
1948
+ ... image_embeds_position_mask=inputs["image_embeds_position_mask"],
1949
+ ... use_cache=True,
1950
+ ... max_new_tokens=64,
1951
+ ... )
1952
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1953
+ >>> processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
1954
+ >>> processed_text
1955
+ '<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>.'
1956
+
1957
+ >>> caption, entities = processor.post_process_generation(generated_text)
1958
+ >>> caption
1959
+ 'An image of a snowman warming himself by a fire.'
1960
+
1961
+ >>> entities
1962
+ [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]
1963
+ ```"""
1964
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1965
+ output_hidden_states = (
1966
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1967
+ )
1968
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1969
+
1970
+ vision_model_output = None
1971
+ projection_attentions = None
1972
+ if image_embeds is None:
1973
+ if pixel_values is None:
1974
+ raise ValueError("You have to specify either `pixel_values` or `image_embeds`.")
1975
+
1976
+ vision_model_output = self.vision_model(
1977
+ pixel_values=pixel_values,
1978
+ output_attentions=output_attentions,
1979
+ output_hidden_states=output_hidden_states,
1980
+ return_dict=return_dict,
1981
+ )
1982
+ # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`.
1983
+ image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])
1984
+ # normalized features
1985
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
1986
+ image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
1987
+
1988
+ lm_outputs = self.text_model(
1989
+ input_ids=input_ids,
1990
+ attention_mask=attention_mask,
1991
+ image_embeds=image_embeds,
1992
+ image_embeds_position_mask=image_embeds_position_mask,
1993
+ head_mask=head_mask,
1994
+ past_key_values=past_key_values,
1995
+ inputs_embeds=inputs_embeds,
1996
+ position_ids=position_ids,
1997
+ labels=labels,
1998
+ use_cache=use_cache,
1999
+ output_attentions=output_attentions,
2000
+ output_hidden_states=output_hidden_states,
2001
+ return_dict=return_dict,
2002
+ )
2003
+
2004
+ if not return_dict:
2005
+ outputs = lm_outputs + (image_embeds, projection_attentions, vision_model_output)
2006
+ return tuple(output for output in outputs if output is not None)
2007
+
2008
+ return Kosmos2ForConditionalGenerationModelOutput(
2009
+ loss=lm_outputs.loss,
2010
+ logits=lm_outputs.logits,
2011
+ past_key_values=lm_outputs.past_key_values,
2012
+ hidden_states=lm_outputs.hidden_states,
2013
+ attentions=lm_outputs.attentions,
2014
+ image_embeds=image_embeds,
2015
+ projection_attentions=projection_attentions,
2016
+ vision_model_output=vision_model_output,
2017
+ )
2018
+
2019
+ def generate(
2020
+ self,
2021
+ pixel_values: Optional[torch.Tensor] = None,
2022
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
2023
+ input_ids: Optional[torch.Tensor] = None,
2024
+ attention_mask: Optional[torch.Tensor] = None,
2025
+ image_embeds: Optional[torch.Tensor] = None,
2026
+ **kwargs,
2027
+ ):
2028
+ # in order to allow `inputs` argument (as in `GenerationMixin`)
2029
+ inputs = kwargs.pop("inputs", None)
2030
+ if pixel_values is not None and inputs is not None:
2031
+ raise ValueError(
2032
+ f"`inputs`: {inputs} were passed alongside `pixel_values` which is not allowed."
2033
+ f"Make sure to either pass `inputs` or pixel_values=..."
2034
+ )
2035
+ if pixel_values is None and inputs is not None:
2036
+ pixel_values = inputs
2037
+
2038
+ if image_embeds is None:
2039
+ vision_model_output = self.vision_model(pixel_values)
2040
+ # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`.
2041
+ image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])
2042
+ # normalized features
2043
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
2044
+ image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
2045
+
2046
+ output = self.text_model.generate(
2047
+ input_ids=input_ids,
2048
+ attention_mask=attention_mask,
2049
+ image_embeds=image_embeds,
2050
+ image_embeds_position_mask=image_embeds_position_mask,
2051
+ **kwargs,
2052
+ )
2053
+
2054
+ return output
venv/lib/python3.10/site-packages/transformers/models/kosmos2/processing_kosmos2.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Processor class for KOSMOS-2."""
16
+
17
+ import copy
18
+ import math
19
+ import re
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ from ...image_processing_utils import BatchFeature
23
+ from ...image_utils import ImageInput, is_batched
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils import AddedToken
26
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
27
+ from ...utils import TensorType
28
+
29
+
30
+ BboxInput = Union[
31
+ List[Tuple[int, int]],
32
+ List[Tuple[float, float, float, float]],
33
+ List[List[Tuple[int, int]]],
34
+ List[List[Tuple[float, float, float]]],
35
+ ]
36
+
37
+
38
+ class Kosmos2Processor(ProcessorMixin):
39
+ r"""
40
+ Constructs an KOSMOS-2 processor which wraps a KOSMOS-2 image processor and a KOSMOS-2 tokenizer into a single
41
+ processor.
42
+
43
+ [`Kosmos2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and some functionalities of
44
+ [`XLMRobertaTokenizerFast`]. See the docstring of [`~Kosmos2Processor.__call__`] and [`~Kosmos2Processor.decode`]
45
+ for more information.
46
+
47
+ Args:
48
+ image_processor (`CLIPImageProcessor`):
49
+ An instance of [`CLIPImageProcessor`]. The image processor is a required input.
50
+ tokenizer (`XLMRobertaTokenizerFast`):
51
+ An instance of ['XLMRobertaTokenizerFast`]. The tokenizer is a required input.
52
+ num_patch_index_tokens (`int`, *optional*, defaults to 1024):
53
+ The number of tokens that represent patch indices.
54
+ """
55
+
56
+ attributes = ["image_processor", "tokenizer"]
57
+ image_processor_class = "CLIPImageProcessor"
58
+ tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
59
+
60
+ def __init__(self, image_processor, tokenizer, num_patch_index_tokens=1024):
61
+ tokenizer.return_token_type_ids = False
62
+
63
+ self.eod_token = "</doc>"
64
+
65
+ self.boi_token = ""
67
+
68
+ self.eoc_token = "</chunk>"
69
+ self.eol_token = "</line>"
70
+
71
+ self.bop_token = "<phrase>"
72
+ self.eop_token = "</phrase>"
73
+
74
+ self.boo_token = "<object>"
75
+ self.eoo_token = "</object>"
76
+
77
+ self.dom_token = "</delimiter_of_multi_objects/>"
78
+
79
+ self.grd_token = "<grounding>"
80
+
81
+ self.tag_tokens = [
82
+ self.eod_token,
83
+ self.boi_token,
84
+ self.eoi_token,
85
+ self.eoc_token,
86
+ self.eol_token,
87
+ self.bop_token,
88
+ self.eop_token,
89
+ self.boo_token,
90
+ self.eoo_token,
91
+ self.dom_token,
92
+ self.grd_token,
93
+ ]
94
+
95
+ self.num_patch_index_tokens = num_patch_index_tokens
96
+ patch_index_tokens = [f"<patch_index_{str(x).zfill(4)}>" for x in range(self.num_patch_index_tokens)]
97
+
98
+ tokens_to_add = []
99
+ for token in self.tag_tokens + patch_index_tokens:
100
+ tokens_to_add.append(AddedToken(token, lstrip=True, rstrip=False, normalized=False))
101
+ tokenizer.add_tokens(tokens_to_add)
102
+
103
+ super().__init__(image_processor, tokenizer)
104
+
105
+ def __call__(
106
+ self,
107
+ images: ImageInput = None,
108
+ text: Union[TextInput, List[TextInput]] = None,
109
+ bboxes: BboxInput = None,
110
+ num_image_tokens: Optional[int] = 64,
111
+ first_image_token_id: Optional[int] = None,
112
+ add_special_tokens: bool = True,
113
+ add_eos_token: bool = False,
114
+ padding: Union[bool, str, PaddingStrategy] = False,
115
+ truncation: Union[bool, str, TruncationStrategy] = None,
116
+ max_length: Optional[int] = None,
117
+ pad_to_multiple_of: Optional[int] = None,
118
+ return_attention_mask: Optional[bool] = None,
119
+ return_length: bool = False,
120
+ verbose: bool = True,
121
+ return_tensors: Optional[Union[str, TensorType]] = None,
122
+ **kwargs,
123
+ ) -> BatchFeature:
124
+ """
125
+ This method uses [`CLIPImageProcessor.__call__`] method to prepare image(s) for the model, and
126
+ [`XLMRobertaTokenizerFast.__call__`] to prepare text for the model.
127
+
128
+ Please refer to the docstring of the above two methods for more information.
129
+
130
+ The rest of this documentation shows the arguments specific to `Kosmos2Processor`.
131
+
132
+ Args:
133
+ bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*):
134
+ The bounding bboxes associated to `texts`.
135
+ num_image_tokens (`int`, defaults to 64):
136
+ The number of (consecutive) places that are used to mark the placeholders to store image information.
137
+ This should be the same as `latent_query_num` in the instance of `Kosmos2Config` you are using.
138
+ first_image_token_id (`int`, *optional*):
139
+ The token id that will be used for the first place of the subsequence that is reserved to store image
140
+ information. If unset, will default to `self.tokenizer.unk_token_id + 1`.
141
+ add_eos_token (`bool`, defaults to `False`):
142
+ Whether or not to include `EOS` token id in the encoding when `add_special_tokens=True`.
143
+ """
144
+ if images is None and text is None:
145
+ raise ValueError("You have to specify either images or text.")
146
+
147
+ encoding = BatchFeature()
148
+
149
+ if images is not None:
150
+ image_encoding = self.image_processor(images, return_tensors=return_tensors)
151
+ encoding.update(image_encoding)
152
+
153
+ if text is not None:
154
+ text = self.preprocess_examples(text, images, bboxes, num_image_tokens=num_image_tokens)
155
+
156
+ if add_special_tokens and not add_eos_token:
157
+ if isinstance(text, str):
158
+ text = f"{self.tokenizer.bos_token}{text}"
159
+ elif isinstance(text, list):
160
+ text = [f"{self.tokenizer.bos_token}{s}" for s in text]
161
+
162
+ text_encoding = self.tokenizer(
163
+ text=text,
164
+ add_special_tokens=(add_special_tokens and add_eos_token),
165
+ padding=padding and images is None,
166
+ truncation=truncation,
167
+ max_length=max_length,
168
+ pad_to_multiple_of=pad_to_multiple_of if images is None else pad_to_multiple_of,
169
+ return_attention_mask=return_attention_mask,
170
+ verbose=verbose,
171
+ return_tensors=return_tensors if images is None else None,
172
+ **kwargs,
173
+ )
174
+ encoding.update(text_encoding)
175
+
176
+ if text is not None and images is not None:
177
+ # Use the id of the first token after <unk>
178
+ if first_image_token_id is None:
179
+ first_image_token_id = self.tokenizer.unk_token_id + 1
180
+
181
+ # To see if we need one more `0` (for `<s>`) at the beginning of `image_embeds_position_mask`.
182
+ with_bos = add_special_tokens
183
+
184
+ # The first (actual) ``
311
+ text = f"{img_info_tokens} {text}"
312
+
313
+ # Add `<object> <patch_idx_xxxx> <patch_idx_yyy> </object>` after `<phrase> phrase text </phrase>`
314
+ text = self._insert_patch_index_tokens(text, bboxes)
315
+ return text
316
+
317
+ def preprocess_examples(
318
+ self,
319
+ texts: Union[TextInput, List[TextInput]],
320
+ images: ImageInput = None,
321
+ bboxes: BboxInput = None,
322
+ num_image_tokens: Optional[int] = 64,
323
+ ) -> Union[str, List[str]]:
324
+ """Add image and bounding box information to `texts` as image and patch index tokens.
325
+
326
+ Args:
327
+ texts (`Union[TextInput, List[TextInput]]`): The texts to be processed.
328
+ images (`ImageInput`, *optional*): The images associated to `texts`.
329
+ bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*):
330
+ The bounding bboxes associated to `texts`.
331
+ num_image_tokens (`int`, *optional*, defaults to 64):
332
+ The number of image tokens (used as latent queries). This should corresponds to the `latent_query_num`
333
+ attribute in `Kosmos2Config`.
334
+
335
+ Returns:
336
+ `Union[TextInput, List[TextInput]]`: The processed texts with image and patch index tokens.
337
+ """
338
+ # These are fake ``.
339
+ img_tokens = [self.boi_token] * num_image_tokens
340
+ img_info_tokens = " ".join([self.boi_token] + img_tokens + [self.eoi_token])
341
+
342
+ # make batch to simplify processing logic
343
+ batched = True
344
+ if isinstance(texts, str):
345
+ batched = False
346
+ texts = [texts]
347
+
348
+ if images is None:
349
+ images = [None] * len(texts)
350
+ elif not is_batched(images):
351
+ images = [images]
352
+ if len(texts) != len(images):
353
+ raise ValueError(
354
+ f"The number of examples in `texts` and `images` should be the same. Got {len(texts)} v.s. {len(images)} instead."
355
+ )
356
+
357
+ if not batched:
358
+ self._check_bboxes_for_single_text(bboxes)
359
+ bboxes = [bboxes]
360
+ elif bboxes is not None:
361
+ if not isinstance(bboxes, list):
362
+ raise ValueError("`bboxes` should be `None` or a list (as a batch) when `texts` is passed as a batch.")
363
+ for x in bboxes:
364
+ self._check_bboxes_for_single_text(x)
365
+ else:
366
+ bboxes = [None] * len(texts)
367
+
368
+ if len(bboxes) != len(texts):
369
+ raise ValueError(
370
+ f"The number of examples in `texts` and `bboxes` should be the same. Got {len(texts)} v.s. {len(bboxes)} instead."
371
+ )
372
+
373
+ result = [
374
+ self._preprocess_single_example(text, image, bbox, img_info_tokens)
375
+ for text, image, bbox in zip(texts, images, bboxes)
376
+ ]
377
+ # un-batch if necessary
378
+ if not batched:
379
+ result = result[0]
380
+
381
+ return result
382
+
383
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
384
+ def batch_decode(self, *args, **kwargs):
385
+ """
386
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
387
+ refer to the docstring of this method for more information.
388
+ """
389
+ return self.tokenizer.batch_decode(*args, **kwargs)
390
+
391
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
392
+ def decode(self, *args, **kwargs):
393
+ """
394
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
395
+ the docstring of this method for more information.
396
+ """
397
+ return self.tokenizer.decode(*args, **kwargs)
398
+
399
+ def post_process_generation(self, text, cleanup_and_extract=True):
400
+ caption = text.split(self.eoi_token)[-1]
401
+ if cleanup_and_extract:
402
+ return clean_text_and_extract_entities_with_bboxes(caption)
403
+ return caption
404
+
405
+ @property
406
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
407
+ def model_input_names(self):
408
+ tokenizer_input_names = self.tokenizer.model_input_names
409
+ image_processor_input_names = self.image_processor.model_input_names
410
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
411
+
412
+ def _insert_patch_index_tokens(self, text: str, bboxes: Union[List[Tuple[int]], List[Tuple[float]]]) -> str:
413
+ if bboxes is None or len(bboxes) == 0:
414
+ return text
415
+
416
+ matched_phrases = list(re.finditer(r"<phrase>.+?</phrase>", string=text))
417
+ if len(matched_phrases) != len(bboxes):
418
+ raise ValueError(
419
+ f"The number of elements in `bboxes` should be the same as the number of `<phrase> ... </phrase>` pairs in `text`. Got {len(matched_phrases)} v.s. {len(bboxes)} instead."
420
+ )
421
+
422
+ # insert object's patch index tokens
423
+ # the found `<phrase> ... </phrase>` pairs.
424
+ curr_pos = 0
425
+ buffer = []
426
+ for matched, bbox in zip(matched_phrases, bboxes):
427
+ _, end = matched.span()
428
+ buffer.append(text[curr_pos:end])
429
+ curr_pos = end
430
+ # A phrase without bbox
431
+ if bbox is None:
432
+ continue
433
+ # A phrase with a single bbox
434
+ if isinstance(bbox, tuple):
435
+ bbox = [bbox]
436
+ patch_index_strings = []
437
+ # A phrase could have multiple bboxes
438
+ if not all(box is not None for box in bbox):
439
+ raise ValueError(
440
+ "The multiple bounding boxes for a single phrase should not contain any `None` value."
441
+ )
442
+ for box in bbox:
443
+ patch_index_1, patch_index_2 = self._convert_bbox_to_patch_index_tokens(box)
444
+ patch_index_strings.append(f"{patch_index_1} {patch_index_2}")
445
+ # `bbox` being an empty list
446
+ if len(patch_index_strings) == 0:
447
+ continue
448
+ position_str = " </delimiter_of_multi_objects/> ".join(patch_index_strings)
449
+ buffer.append(f"<object> {position_str} </object>")
450
+ # remaining
451
+ if curr_pos < len(text):
452
+ buffer.append(text[curr_pos:])
453
+
454
+ text = "".join(buffer)
455
+ return text
456
+
457
+ def _convert_bbox_to_patch_index_tokens(
458
+ self, bbox: Union[Tuple[int, int], Tuple[float, float, float, float]]
459
+ ) -> Tuple[str, str]:
460
+ # already computed patch indices
461
+ if len(bbox) == 2:
462
+ idx_1, idx_2 = bbox
463
+ # bbox specified with (normalized) coordinates
464
+ else:
465
+ # use `self.tokenizer` to get `num_patches_per_side`
466
+ num_patches_per_side = int(math.sqrt(self.num_patch_index_tokens))
467
+ idx_1, idx_2 = coordinate_to_patch_index(bbox, num_patches_per_side)
468
+
469
+ token_1 = f"<patch_index_{str(idx_1).zfill(4)}>"
470
+ token_2 = f"<patch_index_{str(idx_2).zfill(4)}>"
471
+
472
+ return token_1, token_2
473
+
474
+
475
+ def coordinate_to_patch_index(bbox: Tuple[float, float, float, float], num_patches_per_side: int) -> Tuple[int, int]:
476
+ """Convert a bounding box to a pair of patch indices.
477
+
478
+ Args:
479
+ bbox (`Tuple[float, float, float, float]`):
480
+ The 4 coordinates of the bounding box, with the format being (x1, y1, x2, y2) specifying the upper-left and
481
+ lower-right corners of the box. It should have x2 > x1 and y2 > y1.
482
+ num_patches_per_side (`int`): the number of patches along each side.
483
+
484
+ Returns:
485
+ `Tuple[int, int]`: A pair of patch indices representing the upper-left patch and lower-right patch.
486
+ """
487
+ (x1, y1, x2, y2) = bbox
488
+
489
+ if not (x2 > x1 and y2 > y1):
490
+ raise ValueError("The coordinates in `bbox` should be `(x1, y1, x2, y2)` with `x2 > x1` and `y2 > y1`.")
491
+
492
+ ul_x = math.floor(x1 * num_patches_per_side)
493
+ ul_y = math.floor(y1 * num_patches_per_side)
494
+
495
+ lr_x = math.ceil(x2 * num_patches_per_side - 1)
496
+ lr_y = math.ceil(y2 * num_patches_per_side - 1)
497
+
498
+ ul_idx = ul_y * num_patches_per_side + ul_x
499
+ lr_idx = lr_y * num_patches_per_side + lr_x
500
+
501
+ return ul_idx, lr_idx
502
+
503
+
504
+ # copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L35C1-L75C38
505
+ # (with format modifications)
506
+ def patch_index_to_coordinate(ul_idx: int, lr_idx: int, num_patches_per_side: int):
507
+ """
508
+ Given a grid of length `num_patches_per_side` and the indices of the upper-left and lower-right corners of a
509
+ bounding box, returns the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
510
+
511
+ Args:
512
+ ul_idx (`int`): the index of the grid cell that corresponds to the upper-left corner of the bounding box.
513
+ lr_idx (`int`): the index of the grid cell that corresponds to the lower-right corner of the bounding box.
514
+ num_patches_per_side (`int`): the number of patches along each side.
515
+
516
+ Returns:
517
+ `Tuple[float]`: the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
518
+ """
519
+ # Compute the size of each cell in the grid
520
+ cell_size = 1.0 / num_patches_per_side
521
+
522
+ # Compute the x and y indices of the upper-left and lower-right corners of the bounding box
523
+ ul_x = ul_idx % num_patches_per_side
524
+ ul_y = ul_idx // num_patches_per_side
525
+
526
+ lr_x = lr_idx % num_patches_per_side
527
+ lr_y = lr_idx // num_patches_per_side
528
+
529
+ # Compute the normalized coordinates of the bounding box
530
+ if ul_idx == lr_idx:
531
+ x1 = ul_x * cell_size
532
+ y1 = ul_y * cell_size
533
+ x2 = lr_x * cell_size + cell_size
534
+ y2 = lr_y * cell_size + cell_size
535
+ elif ul_x == lr_x or ul_y == lr_y:
536
+ x1 = ul_x * cell_size
537
+ y1 = ul_y * cell_size
538
+ x2 = lr_x * cell_size + cell_size
539
+ y2 = lr_y * cell_size + cell_size
540
+ else:
541
+ x1 = ul_x * cell_size + cell_size / 2
542
+ y1 = ul_y * cell_size + cell_size / 2
543
+ x2 = lr_x * cell_size + cell_size / 2
544
+ y2 = lr_y * cell_size + cell_size / 2
545
+
546
+ return x1, y1, x2, y2
547
+
548
+
549
+ # copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L4-L33
550
+ # (with format modifications)
551
+ def extract_entities_with_patch_indices(text):
552
+ """Extract entities contained in `text`. The bounding bboxes is given in the form of patch indices.
553
+
554
+ This functioin is only intended to be used within `clean_text_and_extract_entities_with_bboxes` where further
555
+ processing happens, including converting to normalized coordinates and whitespace character cleaning up.
556
+
557
+ Examples:
558
+
559
+ ```python
560
+ >>> text = "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
561
+ >>> entities = extract_entities_with_patch_indices(text)
562
+ >>> entities
563
+ [(' a snowman', (31, 41), [(44, 863)]), (' a fire', (130, 137), [(5, 911)])]
564
+ ```"""
565
+ # The regular expression pattern for matching the required formats
566
+ pattern = r"(?:(<phrase>([^<]+)</phrase>))?<object>((?:<patch_index_\d+><patch_index_\d+></delimiter_of_multi_objects/>)*<patch_index_\d+><patch_index_\d+>)</object>"
567
+
568
+ # Find all matches in the given string
569
+ matches = re.finditer(pattern, text)
570
+
571
+ # Initialize an empty list to store the valid patch_index combinations
572
+ entities_with_patch_indices = []
573
+
574
+ for match in matches:
575
+ # span of a `phrase` that is between <phrase> and </phrase>
576
+ span = match.span(2)
577
+ phrase_tag, phrase, match_content = match.groups()
578
+ if not phrase_tag:
579
+ phrase = None
580
+ # We take the starting position of `<object>`
581
+ span = (match.span(0)[0], match.span(0)[0])
582
+
583
+ # Split the match_content by the delimiter to get individual patch_index pairs
584
+ patch_index_pairs = match_content.split("</delimiter_of_multi_objects/>")
585
+
586
+ entity_bboxes = []
587
+ for pair in patch_index_pairs:
588
+ # Extract the xxxx and yyyy values from the patch_index pair
589
+ x = re.search(r"<patch_index_(\d+)>", pair)
590
+ y = re.search(r"<patch_index_(\d+)>", pair[1:])
591
+
592
+ if x and y:
593
+ if phrase:
594
+ entity_bboxes.append((int(x.group(1)), int(y.group(1))))
595
+ else:
596
+ entity_bboxes.append((int(x.group(1)), int(y.group(1))))
597
+
598
+ if phrase:
599
+ entities_with_patch_indices.append((phrase, span, entity_bboxes))
600
+ else:
601
+ for bbox in entity_bboxes:
602
+ # fake entity name
603
+ entity = f"<patch_index_{bbox[0]}><patch_index_{bbox[1]}>"
604
+ entities_with_patch_indices.append((entity, span, [bbox]))
605
+
606
+ return entities_with_patch_indices
607
+
608
+
609
+ def adjust_entity_positions(entity, text):
610
+ """Adjust the positions of the entities in `text` to be relative to the text with special fields removed."""
611
+ entity_name, (start, end) = entity
612
+ # computed the length of strings with special fields (tag tokens, patch index tokens, etc.) removed
613
+ adjusted_start = len(re.sub("<.*?>", "", text[:start]))
614
+ adjusted_end = len(re.sub("<.*?>", "", text[:end]))
615
+ adjusted_entity = (entity_name, (adjusted_start, adjusted_end))
616
+ return adjusted_entity
617
+
618
+
619
+ def _cleanup_spaces(text, entities):
620
+ """Remove the spaces around the text and the entities in it."""
621
+ new_text = text.strip()
622
+ leading_spaces = len(text) - len(text.lstrip())
623
+
624
+ new_entities = []
625
+ for entity_name, (start, end), bboxes in entities:
626
+ entity_name_leading_spaces = len(entity_name) - len(entity_name.lstrip())
627
+ entity_name_trailing_spaces = len(entity_name) - len(entity_name.rstrip())
628
+
629
+ start = start - leading_spaces + entity_name_leading_spaces
630
+ end = end - leading_spaces - entity_name_trailing_spaces
631
+ entity_name = entity_name.strip()
632
+
633
+ new_entities.append((entity_name, (start, end), bboxes))
634
+
635
+ return new_text, new_entities
636
+
637
+
638
+ # copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L77-L87
639
+ # (with format modifications)
640
+ def clean_text_and_extract_entities_with_bboxes(text, num_patches_per_side=32):
641
+ """Remove the tag tokens from `text`, extract entities in it with some cleaning up of white characters.
642
+
643
+ Examples:
644
+
645
+ ```python
646
+ >>> text = "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
647
+ >>> clean_text, entities = clean_text_and_extract_entities_with_bboxes(text)
648
+ >>> clean_text
649
+ 'An image of a snowman warming himself by a fire.'
650
+
651
+ >>> entities
652
+ [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]
653
+ ```"""
654
+ # remove special fields (tag tokens, patch index tokens, etc.)
655
+ processed_text = re.sub("<.*?>", "", text)
656
+
657
+ entities_with_patch_indices = extract_entities_with_patch_indices(text)
658
+ entities = []
659
+ for item in entities_with_patch_indices:
660
+ entity, bboxes = item[0:2], item[2]
661
+ adjusted_entity = adjust_entity_positions(entity, text)
662
+ bboxes_in_coords = [patch_index_to_coordinate(bbox[0], bbox[1], num_patches_per_side) for bbox in bboxes]
663
+
664
+ entities.append(adjusted_entity + (bboxes_in_coords,))
665
+
666
+ return _cleanup_spaces(processed_text, entities)
venv/lib/python3.10/site-packages/transformers/models/mask2former/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mask2former": [
21
+ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Mask2FormerConfig",
23
+ ],
24
+ }
25
+
26
+ try:
27
+ if not is_vision_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["image_processing_mask2former"] = ["Mask2FormerImageProcessor"]
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_mask2former"] = [
41
+ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "Mask2FormerForUniversalSegmentation",
43
+ "Mask2FormerModel",
44
+ "Mask2FormerPreTrainedModel",
45
+ ]
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_mask2former import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Mask2FormerConfig
49
+
50
+ try:
51
+ if not is_vision_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .image_processing_mask2former import Mask2FormerImageProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_mask2former import (
65
+ MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ Mask2FormerForUniversalSegmentation,
67
+ Mask2FormerModel,
68
+ Mask2FormerPreTrainedModel,
69
+ )
70
+
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/configuration_mask2former.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/convert_mask2former_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (26.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/image_processing_mask2former.cpython-310.pyc ADDED
Binary file (39.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/modeling_mask2former.cpython-310.pyc ADDED
Binary file (88.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mask2former/configuration_mask2former.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc.and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Mask2Former model configuration"""
16
+ from typing import Dict, List, Optional
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING
21
+ from ..deprecated._archive_maps import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class Mask2FormerConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Mask2FormerModel`]. It is used to instantiate a
30
+ Mask2Former model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the Mask2Former
32
+ [facebook/mask2former-swin-small-coco-instance](https://huggingface.co/facebook/mask2former-swin-small-coco-instance)
33
+ architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Currently, Mask2Former only supports the [Swin Transformer](swin) as backbone.
39
+
40
+ Args:
41
+ backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `SwinConfig()`):
42
+ The configuration of the backbone model. If unset, the configuration corresponding to
43
+ `swin-base-patch4-window12-384` will be used.
44
+ backbone (`str`, *optional*):
45
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
46
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
47
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
48
+ use_pretrained_backbone (`bool`, *optional*, `False`):
49
+ Whether to use pretrained weights for the backbone.
50
+ use_timm_backbone (`bool`, *optional*, `False`):
51
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
52
+ library.
53
+ backbone_kwargs (`dict`, *optional*):
54
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
55
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
56
+ feature_size (`int`, *optional*, defaults to 256):
57
+ The features (channels) of the resulting feature maps.
58
+ mask_feature_size (`int`, *optional*, defaults to 256):
59
+ The masks' features size, this value will also be used to specify the Feature Pyramid Network features'
60
+ size.
61
+ hidden_dim (`int`, *optional*, defaults to 256):
62
+ Dimensionality of the encoder layers.
63
+ encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
64
+ Dimension of feedforward network for deformable detr encoder used as part of pixel decoder.
65
+ encoder_layers (`int`, *optional*, defaults to 6):
66
+ Number of layers in the deformable detr encoder used as part of pixel decoder.
67
+ decoder_layers (`int`, *optional*, defaults to 10):
68
+ Number of layers in the Transformer decoder.
69
+ num_attention_heads (`int`, *optional*, defaults to 8):
70
+ Number of attention heads for each attention layer.
71
+ dropout (`float`, *optional*, defaults to 0.1):
72
+ The dropout probability for all fully connected layers in the embeddings, encoder.
73
+ dim_feedforward (`int`, *optional*, defaults to 2048):
74
+ Feature dimension in feedforward network for transformer decoder.
75
+ pre_norm (`bool`, *optional*, defaults to `False`):
76
+ Whether to use pre-LayerNorm or not for transformer decoder.
77
+ enforce_input_projection (`bool`, *optional*, defaults to `False`):
78
+ Whether to add an input projection 1x1 convolution even if the input channels and hidden dim are identical
79
+ in the Transformer decoder.
80
+ common_stride (`int`, *optional*, defaults to 4):
81
+ Parameter used for determining number of FPN levels used as part of pixel decoder.
82
+ ignore_value (`int`, *optional*, defaults to 255):
83
+ Category id to be ignored during training.
84
+ num_queries (`int`, *optional*, defaults to 100):
85
+ Number of queries for the decoder.
86
+ no_object_weight (`int`, *optional*, defaults to 0.1):
87
+ The weight to apply to the null (no object) class.
88
+ class_weight (`int`, *optional*, defaults to 2.0):
89
+ The weight for the cross entropy loss.
90
+ mask_weight (`int`, *optional*, defaults to 5.0):
91
+ The weight for the mask loss.
92
+ dice_weight (`int`, *optional*, defaults to 5.0):
93
+ The weight for the dice loss.
94
+ train_num_points (`str` or `function`, *optional*, defaults to 12544):
95
+ Number of points used for sampling during loss calculation.
96
+ oversample_ratio (`float`, *optional*, defaults to 3.0):
97
+ Oversampling parameter used for calculating no. of sampled points
98
+ importance_sample_ratio (`float`, *optional*, defaults to 0.75):
99
+ Ratio of points that are sampled via importance sampling.
100
+ init_std (`float`, *optional*, defaults to 0.02):
101
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
102
+ init_xavier_std (`float`, *optional*, defaults to 1.0):
103
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
104
+ use_auxiliary_loss (`boolean``, *optional*, defaults to `True`):
105
+ If `True` [`Mask2FormerForUniversalSegmentationOutput`] will contain the auxiliary losses computed using
106
+ the logits from each decoder's stage.
107
+ feature_strides (`List[int]`, *optional*, defaults to `[4, 8, 16, 32]`):
108
+ Feature strides corresponding to features generated from backbone network.
109
+ output_auxiliary_logits (`bool`, *optional*):
110
+ Should the model output its `auxiliary_logits` or not.
111
+
112
+ Examples:
113
+
114
+ ```python
115
+ >>> from transformers import Mask2FormerConfig, Mask2FormerModel
116
+
117
+ >>> # Initializing a Mask2Former facebook/mask2former-swin-small-coco-instance configuration
118
+ >>> configuration = Mask2FormerConfig()
119
+
120
+ >>> # Initializing a model (with random weights) from the facebook/mask2former-swin-small-coco-instance style configuration
121
+ >>> model = Mask2FormerModel(configuration)
122
+
123
+ >>> # Accessing the model configuration
124
+ >>> configuration = model.config
125
+ ```
126
+
127
+ """
128
+
129
+ model_type = "mask2former"
130
+ backbones_supported = ["swin"]
131
+ attribute_map = {"hidden_size": "hidden_dim"}
132
+
133
+ def __init__(
134
+ self,
135
+ backbone_config: Optional[Dict] = None,
136
+ feature_size: int = 256,
137
+ mask_feature_size: int = 256,
138
+ hidden_dim: int = 256,
139
+ encoder_feedforward_dim: int = 1024,
140
+ activation_function: str = "relu",
141
+ encoder_layers: int = 6,
142
+ decoder_layers: int = 10,
143
+ num_attention_heads: int = 8,
144
+ dropout: float = 0.0,
145
+ dim_feedforward: int = 2048,
146
+ pre_norm: bool = False,
147
+ enforce_input_projection: bool = False,
148
+ common_stride: int = 4,
149
+ ignore_value: int = 255,
150
+ num_queries: int = 100,
151
+ no_object_weight: float = 0.1,
152
+ class_weight: float = 2.0,
153
+ mask_weight: float = 5.0,
154
+ dice_weight: float = 5.0,
155
+ train_num_points: int = 12544,
156
+ oversample_ratio: float = 3.0,
157
+ importance_sample_ratio: float = 0.75,
158
+ init_std: float = 0.02,
159
+ init_xavier_std: float = 1.0,
160
+ use_auxiliary_loss: bool = True,
161
+ feature_strides: List[int] = [4, 8, 16, 32],
162
+ output_auxiliary_logits: bool = None,
163
+ backbone: Optional[str] = None,
164
+ use_pretrained_backbone: bool = False,
165
+ use_timm_backbone: bool = False,
166
+ backbone_kwargs: Optional[Dict] = None,
167
+ **kwargs,
168
+ ):
169
+ if use_pretrained_backbone:
170
+ raise ValueError("Pretrained backbones are not supported yet.")
171
+
172
+ if backbone_config is not None and backbone is not None:
173
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
174
+
175
+ if backbone_config is None and backbone is None:
176
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.")
177
+ backbone_config = CONFIG_MAPPING["swin"](
178
+ image_size=224,
179
+ in_channels=3,
180
+ patch_size=4,
181
+ embed_dim=96,
182
+ depths=[2, 2, 18, 2],
183
+ num_heads=[3, 6, 12, 24],
184
+ window_size=7,
185
+ drop_path_rate=0.3,
186
+ use_absolute_embeddings=False,
187
+ out_features=["stage1", "stage2", "stage3", "stage4"],
188
+ )
189
+
190
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
191
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
192
+
193
+ if isinstance(backbone_config, dict):
194
+ backbone_model_type = backbone_config.pop("model_type")
195
+ config_class = CONFIG_MAPPING[backbone_model_type]
196
+ backbone_config = config_class.from_dict(backbone_config)
197
+
198
+ # verify that the backbone is supported
199
+ if backbone_config is not None and backbone_config.model_type not in self.backbones_supported:
200
+ logger.warning_once(
201
+ f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
202
+ f"Supported model types: {','.join(self.backbones_supported)}"
203
+ )
204
+
205
+ self.backbone_config = backbone_config
206
+ self.feature_size = feature_size
207
+ self.mask_feature_size = mask_feature_size
208
+ self.hidden_dim = hidden_dim
209
+ self.encoder_feedforward_dim = encoder_feedforward_dim
210
+ self.activation_function = activation_function
211
+ self.encoder_layers = encoder_layers
212
+ self.decoder_layers = decoder_layers
213
+ self.num_attention_heads = num_attention_heads
214
+ self.dropout = dropout
215
+ self.dim_feedforward = dim_feedforward
216
+ self.pre_norm = pre_norm
217
+ self.enforce_input_projection = enforce_input_projection
218
+ self.common_stride = common_stride
219
+ self.ignore_value = ignore_value
220
+ self.num_queries = num_queries
221
+ self.no_object_weight = no_object_weight
222
+ self.class_weight = class_weight
223
+ self.mask_weight = mask_weight
224
+ self.dice_weight = dice_weight
225
+ self.train_num_points = train_num_points
226
+ self.oversample_ratio = oversample_ratio
227
+ self.importance_sample_ratio = importance_sample_ratio
228
+ self.init_std = init_std
229
+ self.init_xavier_std = init_xavier_std
230
+ self.use_auxiliary_loss = use_auxiliary_loss
231
+ self.feature_strides = feature_strides
232
+ self.output_auxiliary_logits = output_auxiliary_logits
233
+ self.num_hidden_layers = decoder_layers
234
+ self.backbone = backbone
235
+ self.use_pretrained_backbone = use_pretrained_backbone
236
+ self.use_timm_backbone = use_timm_backbone
237
+ self.backbone_kwargs = backbone_kwargs
238
+
239
+ super().__init__(**kwargs)
240
+
241
+ @classmethod
242
+ def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
243
+ """Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration.
244
+
245
+ Args:
246
+ backbone_config ([`PretrainedConfig`]):
247
+ The backbone configuration.
248
+
249
+ Returns:
250
+ [`Mask2FormerConfig`]: An instance of a configuration object
251
+ """
252
+ return cls(
253
+ backbone_config=backbone_config,
254
+ **kwargs,
255
+ )
venv/lib/python3.10/site-packages/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,1019 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import json
16
+ import sys
17
+ from argparse import ArgumentParser
18
+ from dataclasses import dataclass
19
+ from pathlib import Path
20
+ from pprint import pformat
21
+ from typing import Any, Dict, Iterator, List, Set, Tuple
22
+
23
+ import requests
24
+ import torch
25
+ import torchvision.transforms as T
26
+ from detectron2.checkpoint import DetectionCheckpointer
27
+ from detectron2.config import get_cfg
28
+ from detectron2.projects.deeplab import add_deeplab_config
29
+ from huggingface_hub import hf_hub_download
30
+ from PIL import Image
31
+ from torch import Tensor, nn
32
+
33
+ from transformers import (
34
+ Mask2FormerConfig,
35
+ Mask2FormerForUniversalSegmentation,
36
+ Mask2FormerImageProcessor,
37
+ Mask2FormerModel,
38
+ SwinConfig,
39
+ )
40
+ from transformers.models.mask2former.modeling_mask2former import (
41
+ Mask2FormerForUniversalSegmentationOutput,
42
+ Mask2FormerModelOutput,
43
+ )
44
+ from transformers.utils import logging
45
+
46
+
47
+ StateDict = Dict[str, Tensor]
48
+
49
+ logging.set_verbosity_info()
50
+ logger = logging.get_logger()
51
+
52
+ torch.manual_seed(0)
53
+
54
+
55
+ class TrackedStateDict:
56
+ def __init__(self, to_track: Dict):
57
+ """This class "tracks" a python dictionary by keeping track of which item is accessed.
58
+
59
+ Args:
60
+ to_track (Dict): The dictionary we wish to track
61
+ """
62
+ self.to_track = to_track
63
+ self._seen: Set[str] = set()
64
+
65
+ def __getitem__(self, key: str) -> Any:
66
+ return self.to_track[key]
67
+
68
+ def __setitem__(self, key: str, item: Any):
69
+ self._seen.add(key)
70
+ self.to_track[key] = item
71
+
72
+ def diff(self) -> List[str]:
73
+ """This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
74
+ This is an effective method to check if we have update all the keys
75
+
76
+ Returns:
77
+ List[str]: List of keys not yet updated
78
+ """
79
+ return set(self.to_track.keys()) - self._seen
80
+
81
+ def copy(self) -> Dict:
82
+ # proxy the call to the internal dictionary
83
+ return self.to_track.copy()
84
+
85
+
86
+ # We will verify our results on an image of cute cats
87
+ def prepare_img():
88
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
89
+ img_data = requests.get(url, stream=True).raw
90
+ im = Image.open(img_data)
91
+ return im
92
+
93
+
94
+ @dataclass
95
+ class Args:
96
+ """Fake command line arguments needed by mask2former/detectron implementation"""
97
+
98
+ config_file: str
99
+
100
+
101
+ def setup_cfg(args: Args):
102
+ # load config from file and command-line arguments
103
+ cfg = get_cfg()
104
+ add_deeplab_config(cfg)
105
+ add_maskformer2_config(cfg)
106
+ cfg.merge_from_file(args.config_file)
107
+ cfg.freeze()
108
+ return cfg
109
+
110
+
111
+ class OriginalMask2FormerConfigToOursConverter:
112
+ def __call__(self, original_config: object) -> Mask2FormerConfig:
113
+ model = original_config.MODEL
114
+
115
+ repo_id = "huggingface/label-files"
116
+ if model.SEM_SEG_HEAD.NUM_CLASSES == 847:
117
+ filename = "mask2former-ade20k-full-id2label.json"
118
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 150:
119
+ filename = "ade20k-id2label.json"
120
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 80:
121
+ filename = "coco-detection-mmdet-id2label.json"
122
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 171:
123
+ filename = "mask2former-coco-stuff-id2label.json"
124
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 133:
125
+ filename = "coco-panoptic-id2label.json"
126
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 19:
127
+ filename = "cityscapes-id2label.json"
128
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 8:
129
+ filename = "cityscapes-instance-id2label.json"
130
+ elif model.SEM_SEG_HEAD.NUM_CLASSES == 65:
131
+ filename = "mapillary-vistas-id2label.json"
132
+
133
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
134
+ id2label = {int(k): v for k, v in id2label.items()}
135
+ label2id = {label: idx for idx, label in id2label.items()}
136
+
137
+ if model.SWIN.EMBED_DIM == 96:
138
+ backbone_config = SwinConfig.from_pretrained(
139
+ "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"]
140
+ )
141
+ elif model.SWIN.EMBED_DIM == 128:
142
+ backbone_config = SwinConfig(
143
+ embed_dim=128,
144
+ window_size=12,
145
+ depths=(2, 2, 18, 2),
146
+ num_heads=(4, 8, 16, 32),
147
+ out_features=["stage1", "stage2", "stage3", "stage4"],
148
+ )
149
+
150
+ elif model.SWIN.EMBED_DIM == 192:
151
+ backbone_config = SwinConfig.from_pretrained(
152
+ "microsoft/swin-large-patch4-window12-384", out_features=["stage1", "stage2", "stage3", "stage4"]
153
+ )
154
+ else:
155
+ raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!")
156
+
157
+ backbone_config.drop_path_rate = model.SWIN.DROP_PATH_RATE
158
+ backbone_config.attention_probs_dropout_prob = model.SWIN.ATTN_DROP_RATE
159
+ backbone_config.depths = model.SWIN.DEPTHS
160
+
161
+ config: Mask2FormerConfig = Mask2FormerConfig(
162
+ ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE,
163
+ num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
164
+ num_queries=model.MASK_FORMER.NUM_OBJECT_QUERIES,
165
+ no_object_weight=model.MASK_FORMER.NO_OBJECT_WEIGHT,
166
+ class_weight=model.MASK_FORMER.CLASS_WEIGHT,
167
+ mask_weight=model.MASK_FORMER.MASK_WEIGHT,
168
+ dice_weight=model.MASK_FORMER.DICE_WEIGHT,
169
+ train_num_points=model.MASK_FORMER.TRAIN_NUM_POINTS,
170
+ oversample_ratio=model.MASK_FORMER.OVERSAMPLE_RATIO,
171
+ importance_sample_ratio=model.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
172
+ init_std=0.02,
173
+ init_xavier_std=1.0,
174
+ use_auxiliary_loss=model.MASK_FORMER.DEEP_SUPERVISION,
175
+ feature_strides=[4, 8, 16, 32],
176
+ backbone_config=backbone_config,
177
+ id2label=id2label,
178
+ label2id=label2id,
179
+ feature_size=model.SEM_SEG_HEAD.CONVS_DIM,
180
+ mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM,
181
+ hidden_dim=model.MASK_FORMER.HIDDEN_DIM,
182
+ encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS,
183
+ encoder_feedforward_dim=1024,
184
+ decoder_layers=model.MASK_FORMER.DEC_LAYERS,
185
+ num_attention_heads=model.MASK_FORMER.NHEADS,
186
+ dropout=model.MASK_FORMER.DROPOUT,
187
+ dim_feedforward=model.MASK_FORMER.DIM_FEEDFORWARD,
188
+ pre_norm=model.MASK_FORMER.PRE_NORM,
189
+ enforce_input_proj=model.MASK_FORMER.ENFORCE_INPUT_PROJ,
190
+ common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE,
191
+ )
192
+ return config
193
+
194
+
195
+ class OriginalMask2FormerConfigToImageProcessorConverter:
196
+ def __call__(self, original_config: object) -> Mask2FormerImageProcessor:
197
+ model = original_config.MODEL
198
+ model_input = original_config.INPUT
199
+
200
+ return Mask2FormerImageProcessor(
201
+ image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(),
202
+ image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(),
203
+ size=model_input.MIN_SIZE_TEST,
204
+ max_size=model_input.MAX_SIZE_TEST,
205
+ num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
206
+ ignore_index=model.SEM_SEG_HEAD.IGNORE_VALUE,
207
+ size_divisibility=32,
208
+ )
209
+
210
+
211
+ class OriginalMask2FormerCheckpointToOursConverter:
212
+ def __init__(self, original_model: nn.Module, config: Mask2FormerConfig):
213
+ self.original_model = original_model
214
+ self.config = config
215
+
216
+ def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
217
+ for src_key, dst_key in renamed_keys:
218
+ dst_state_dict[dst_key] = src_state_dict.pop(src_key)
219
+
220
+ def replace_maskformer_swin_backbone(
221
+ self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig
222
+ ):
223
+ dst_prefix: str = "pixel_level_module.encoder"
224
+ src_prefix: str = "backbone"
225
+
226
+ renamed_keys = [
227
+ (
228
+ f"{src_prefix}.patch_embed.proj.weight",
229
+ f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight",
230
+ ),
231
+ (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"),
232
+ (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"),
233
+ (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"),
234
+ ]
235
+ num_layers = len(config.backbone_config.depths)
236
+ for layer_idx in range(num_layers):
237
+ for block_idx in range(config.backbone_config.depths[layer_idx]):
238
+ renamed_keys.extend(
239
+ [ # src, dst
240
+ (
241
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
242
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
243
+ ),
244
+ (
245
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
246
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
247
+ ),
248
+ (
249
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
250
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
251
+ ),
252
+ ]
253
+ )
254
+ # now we need to handle the attentions
255
+ # read in weights + bias of input projection layer of cross-attention
256
+
257
+ src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
258
+ src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
259
+
260
+ size = src_att_weight.shape[0]
261
+ offset = size // 3
262
+ dst_state_dict[
263
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
264
+ ] = src_att_weight[:offset, :]
265
+ dst_state_dict[
266
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
267
+ ] = src_att_bias[:offset]
268
+
269
+ dst_state_dict[
270
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
271
+ ] = src_att_weight[offset : offset * 2, :]
272
+ dst_state_dict[
273
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
274
+ ] = src_att_bias[offset : offset * 2]
275
+
276
+ dst_state_dict[
277
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
278
+ ] = src_att_weight[-offset:, :]
279
+ dst_state_dict[
280
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
281
+ ] = src_att_bias[-offset:]
282
+
283
+ # let's pop them
284
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
285
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
286
+ # proj
287
+ renamed_keys.extend(
288
+ [
289
+ (
290
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
291
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
292
+ ),
293
+ (
294
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
295
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
296
+ ),
297
+ ]
298
+ )
299
+
300
+ # second norm
301
+ renamed_keys.extend(
302
+ [
303
+ (
304
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
305
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
306
+ ),
307
+ (
308
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
309
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
310
+ ),
311
+ ]
312
+ )
313
+
314
+ # mlp
315
+ renamed_keys.extend(
316
+ [
317
+ (
318
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
319
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
320
+ ),
321
+ (
322
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
323
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
324
+ ),
325
+ (
326
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
327
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
328
+ ),
329
+ (
330
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
331
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
332
+ ),
333
+ ]
334
+ )
335
+
336
+ renamed_keys.extend(
337
+ [
338
+ (
339
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
340
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
341
+ )
342
+ ]
343
+ )
344
+
345
+ if layer_idx < num_layers - 1:
346
+ # patch merging
347
+ renamed_keys.extend(
348
+ [
349
+ (
350
+ f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
351
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight",
352
+ ),
353
+ (
354
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
355
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight",
356
+ ),
357
+ (
358
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
359
+ f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias",
360
+ ),
361
+ ]
362
+ )
363
+
364
+ # hidden states norms
365
+ renamed_keys.extend(
366
+ [
367
+ (
368
+ f"{src_prefix}.norm{layer_idx}.weight",
369
+ f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight",
370
+ ),
371
+ (
372
+ f"{src_prefix}.norm{layer_idx}.bias",
373
+ f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias",
374
+ ),
375
+ ]
376
+ )
377
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
378
+
379
+ def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig):
380
+ dst_prefix: str = "pixel_level_module.encoder"
381
+ src_prefix: str = "backbone"
382
+
383
+ renamed_keys = [
384
+ (
385
+ f"{src_prefix}.patch_embed.proj.weight",
386
+ f"{dst_prefix}.embeddings.patch_embeddings.projection.weight",
387
+ ),
388
+ (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"),
389
+ (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"),
390
+ (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"),
391
+ ]
392
+
393
+ for layer_idx in range(len(config.backbone_config.depths)):
394
+ for block_idx in range(config.backbone_config.depths[layer_idx]):
395
+ renamed_keys.extend(
396
+ [ # src, dst
397
+ (
398
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
399
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
400
+ ),
401
+ (
402
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
403
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
404
+ ),
405
+ (
406
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
407
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
408
+ ),
409
+ ]
410
+ )
411
+ # now we need to handle the attentions
412
+ # read in weights + bias of input projection layer of cross-attention
413
+
414
+ src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
415
+ src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
416
+
417
+ size = src_att_weight.shape[0]
418
+ offset = size // 3
419
+ dst_state_dict[
420
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
421
+ ] = src_att_weight[:offset, :]
422
+ dst_state_dict[
423
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
424
+ ] = src_att_bias[:offset]
425
+
426
+ dst_state_dict[
427
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
428
+ ] = src_att_weight[offset : offset * 2, :]
429
+ dst_state_dict[
430
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
431
+ ] = src_att_bias[offset : offset * 2]
432
+
433
+ dst_state_dict[
434
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
435
+ ] = src_att_weight[-offset:, :]
436
+ dst_state_dict[
437
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
438
+ ] = src_att_bias[-offset:]
439
+
440
+ # let's pop them
441
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
442
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
443
+ # proj
444
+ renamed_keys.extend(
445
+ [
446
+ (
447
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
448
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
449
+ ),
450
+ (
451
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
452
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
453
+ ),
454
+ ]
455
+ )
456
+
457
+ # second norm
458
+ renamed_keys.extend(
459
+ [
460
+ (
461
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
462
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
463
+ ),
464
+ (
465
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
466
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
467
+ ),
468
+ ]
469
+ )
470
+
471
+ # mlp
472
+ renamed_keys.extend(
473
+ [
474
+ (
475
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
476
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
477
+ ),
478
+ (
479
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
480
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
481
+ ),
482
+ (
483
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
484
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
485
+ ),
486
+ (
487
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
488
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
489
+ ),
490
+ ]
491
+ )
492
+
493
+ renamed_keys.extend(
494
+ [
495
+ (
496
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
497
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
498
+ )
499
+ ]
500
+ )
501
+
502
+ if layer_idx < 3:
503
+ # patch merging
504
+ renamed_keys.extend(
505
+ [
506
+ (
507
+ f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
508
+ f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight",
509
+ ),
510
+ (
511
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
512
+ f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight",
513
+ ),
514
+ (
515
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
516
+ f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias",
517
+ ),
518
+ ]
519
+ )
520
+
521
+ # hidden states norms
522
+ renamed_keys.extend(
523
+ [
524
+ (
525
+ f"{src_prefix}.norm{layer_idx}.weight",
526
+ f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight",
527
+ ),
528
+ (
529
+ f"{src_prefix}.norm{layer_idx}.bias",
530
+ f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias",
531
+ ),
532
+ ]
533
+ )
534
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
535
+
536
+ # Backbone + Pixel Decoder
537
+ def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
538
+ dst_prefix: str = "pixel_level_module.decoder"
539
+ src_prefix: str = "sem_seg_head.pixel_decoder"
540
+
541
+ self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config)
542
+
543
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
544
+ return [
545
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
546
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
547
+ ]
548
+
549
+ def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
550
+ self_attn_keys = []
551
+ self_attn_keys.extend(
552
+ rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights")
553
+ )
554
+ self_attn_keys.extend(
555
+ rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj")
556
+ )
557
+ self_attn_keys.extend(
558
+ rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets")
559
+ )
560
+ self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj"))
561
+
562
+ return self_attn_keys
563
+
564
+ def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
565
+ encoder_keys = []
566
+ encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1"))
567
+ encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2"))
568
+ encoder_keys.extend(
569
+ rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm")
570
+ )
571
+ encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm"))
572
+ encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn"))
573
+
574
+ return encoder_keys
575
+
576
+ # convolution layer for final features
577
+ renamed_keys = [
578
+ (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"),
579
+ (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"),
580
+ (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"),
581
+ ]
582
+
583
+ renamed_keys.extend(
584
+ [
585
+ (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"),
586
+ (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"),
587
+ (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"),
588
+ ]
589
+ )
590
+
591
+ # proj layers
592
+ for i in range(3):
593
+ for j in range(2):
594
+ renamed_keys.extend(
595
+ [
596
+ (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"),
597
+ (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"),
598
+ ]
599
+ )
600
+
601
+ renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")])
602
+
603
+ # layers
604
+ for layer_idx in range(self.config.encoder_layers):
605
+ renamed_keys.extend(
606
+ rename_keys_for_encoder_layer(
607
+ f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}"
608
+ )
609
+ )
610
+
611
+ # proj
612
+ renamed_keys.extend(
613
+ [
614
+ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"),
615
+ (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"),
616
+ ]
617
+ )
618
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
619
+
620
+ # Transformer Decoder
621
+ def rename_keys_in_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
622
+ dst_prefix: str = "transformer_module.decoder"
623
+ src_prefix: str = "sem_seg_head.predictor"
624
+
625
+ rename_keys = []
626
+ for i in range(self.config.decoder_layers - 1):
627
+ rename_keys.append(
628
+ (
629
+ f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.weight",
630
+ f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight",
631
+ )
632
+ )
633
+ rename_keys.append(
634
+ (
635
+ f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.bias",
636
+ f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias",
637
+ )
638
+ )
639
+
640
+ rename_keys.append(
641
+ (
642
+ f"{src_prefix}.transformer_self_attention_layers.{i}.norm.weight",
643
+ f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight",
644
+ )
645
+ )
646
+ rename_keys.append(
647
+ (
648
+ f"{src_prefix}.transformer_self_attention_layers.{i}.norm.bias",
649
+ f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias",
650
+ )
651
+ )
652
+
653
+ rename_keys.append(
654
+ (
655
+ f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_weight",
656
+ f"{dst_prefix}.layers.{i}.cross_attn.in_proj_weight",
657
+ )
658
+ )
659
+ rename_keys.append(
660
+ (
661
+ f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_bias",
662
+ f"{dst_prefix}.layers.{i}.cross_attn.in_proj_bias",
663
+ )
664
+ )
665
+ rename_keys.append(
666
+ (
667
+ f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.weight",
668
+ f"{dst_prefix}.layers.{i}.cross_attn.out_proj.weight",
669
+ )
670
+ )
671
+ rename_keys.append(
672
+ (
673
+ f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.bias",
674
+ f"{dst_prefix}.layers.{i}.cross_attn.out_proj.bias",
675
+ )
676
+ )
677
+
678
+ rename_keys.append(
679
+ (
680
+ f"{src_prefix}.transformer_cross_attention_layers.{i}.norm.weight",
681
+ f"{dst_prefix}.layers.{i}.cross_attn_layer_norm.weight",
682
+ )
683
+ )
684
+ rename_keys.append(
685
+ (
686
+ f"{src_prefix}.transformer_cross_attention_layers.{i}.norm.bias",
687
+ f"{dst_prefix}.layers.{i}.cross_attn_layer_norm.bias",
688
+ )
689
+ )
690
+
691
+ rename_keys.append(
692
+ (f"{src_prefix}.transformer_ffn_layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight")
693
+ )
694
+ rename_keys.append(
695
+ (f"{src_prefix}.transformer_ffn_layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias")
696
+ )
697
+ rename_keys.append(
698
+ (f"{src_prefix}.transformer_ffn_layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight")
699
+ )
700
+ rename_keys.append(
701
+ (f"{src_prefix}.transformer_ffn_layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias")
702
+ )
703
+ rename_keys.append(
704
+ (
705
+ f"{src_prefix}.transformer_ffn_layers.{i}.norm.weight",
706
+ f"{dst_prefix}.layers.{i}.final_layer_norm.weight",
707
+ )
708
+ )
709
+ rename_keys.append(
710
+ (
711
+ f"{src_prefix}.transformer_ffn_layers.{i}.norm.bias",
712
+ f"{dst_prefix}.layers.{i}.final_layer_norm.bias",
713
+ )
714
+ )
715
+
716
+ return rename_keys
717
+
718
+ def replace_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
719
+ dst_prefix: str = "transformer_module.decoder"
720
+ src_prefix: str = "sem_seg_head.predictor"
721
+
722
+ renamed_keys = self.rename_keys_in_masked_attention_decoder(dst_state_dict, src_state_dict)
723
+
724
+ # add more
725
+ renamed_keys.extend(
726
+ [
727
+ (f"{src_prefix}.decoder_norm.weight", f"{dst_prefix}.layernorm.weight"),
728
+ (f"{src_prefix}.decoder_norm.bias", f"{dst_prefix}.layernorm.bias"),
729
+ ]
730
+ )
731
+
732
+ mlp_len = 3
733
+ for i in range(mlp_len):
734
+ renamed_keys.extend(
735
+ [
736
+ (
737
+ f"{src_prefix}.mask_embed.layers.{i}.weight",
738
+ f"{dst_prefix}.mask_predictor.mask_embedder.{i}.0.weight",
739
+ ),
740
+ (
741
+ f"{src_prefix}.mask_embed.layers.{i}.bias",
742
+ f"{dst_prefix}.mask_predictor.mask_embedder.{i}.0.bias",
743
+ ),
744
+ ]
745
+ )
746
+
747
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
748
+
749
+ def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
750
+ dst_prefix: str = "transformer_module.decoder.layers"
751
+ src_prefix: str = "sem_seg_head.predictor"
752
+ for i in range(self.config.decoder_layers - 1):
753
+ # read in weights + bias of input projection layer of self-attention
754
+ in_proj_weight = src_state_dict.pop(
755
+ f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight"
756
+ )
757
+ in_proj_bias = src_state_dict.pop(
758
+ f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias"
759
+ )
760
+ # next, add query, keys and values (in that order) to the state dict
761
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
762
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
763
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
764
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
765
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
766
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
767
+
768
+ def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
769
+ dst_prefix: str = "transformer_module"
770
+ src_prefix: str = "sem_seg_head.predictor"
771
+
772
+ self.replace_masked_attention_decoder(dst_state_dict, src_state_dict)
773
+
774
+ renamed_keys = [
775
+ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"),
776
+ (f"{src_prefix}.query_feat.weight", f"{dst_prefix}.queries_features.weight"),
777
+ (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"),
778
+ ]
779
+
780
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
781
+ self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict)
782
+
783
+ def replace_universal_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
784
+ dst_prefix: str = ""
785
+ src_prefix: str = "sem_seg_head.predictor"
786
+
787
+ renamed_keys = [
788
+ (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"),
789
+ (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"),
790
+ ]
791
+
792
+ logger.info(f"Replacing keys {pformat(renamed_keys)}")
793
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
794
+
795
+ def convert(self, mask2former: Mask2FormerModel) -> Mask2FormerModel:
796
+ dst_state_dict = TrackedStateDict(mask2former.state_dict())
797
+ src_state_dict = self.original_model.state_dict()
798
+
799
+ self.replace_pixel_module(dst_state_dict, src_state_dict)
800
+ self.replace_transformer_module(dst_state_dict, src_state_dict)
801
+
802
+ logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}")
803
+ logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}")
804
+ logger.info("🙌 Done")
805
+
806
+ state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track.keys()}
807
+ mask2former.load_state_dict(state_dict)
808
+ return mask2former
809
+
810
+ def convert_universal_segmentation(
811
+ self, mask2former: Mask2FormerForUniversalSegmentation
812
+ ) -> Mask2FormerForUniversalSegmentation:
813
+ dst_state_dict = TrackedStateDict(mask2former.state_dict())
814
+ src_state_dict = self.original_model.state_dict()
815
+
816
+ self.replace_universal_segmentation_module(dst_state_dict, src_state_dict)
817
+
818
+ state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track.keys()}
819
+ mask2former.load_state_dict(state_dict)
820
+
821
+ return mask2former
822
+
823
+ @staticmethod
824
+ def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]:
825
+ checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl")
826
+
827
+ for checkpoint in checkpoints:
828
+ logger.info(f"💪 Converting {checkpoint.stem}")
829
+ # find associated config file
830
+
831
+ # dataset_name e.g 'coco'
832
+ dataset_name = checkpoint.parents[2].stem
833
+ if dataset_name == "ade":
834
+ dataset_name = dataset_name.replace("ade", "ade20k")
835
+
836
+ # task type e.g 'instance-segmentation'
837
+ segmentation_task = checkpoint.parents[1].stem
838
+
839
+ # config file corresponding to checkpoint
840
+ config_file_name = f"{checkpoint.parents[0].stem}.yaml"
841
+
842
+ config: Path = config_dir / dataset_name / segmentation_task / "swin" / config_file_name
843
+ yield config, checkpoint
844
+
845
+
846
+ def test(
847
+ original_model,
848
+ our_model: Mask2FormerForUniversalSegmentation,
849
+ image_processor: Mask2FormerImageProcessor,
850
+ tolerance: float,
851
+ ):
852
+ with torch.no_grad():
853
+ original_model = original_model.eval()
854
+ our_model = our_model.eval()
855
+
856
+ im = prepare_img()
857
+ x = image_processor(images=im, return_tensors="pt")["pixel_values"]
858
+
859
+ original_model_backbone_features = original_model.backbone(x.clone())
860
+ our_model_output: Mask2FormerModelOutput = our_model.model(x.clone(), output_hidden_states=True)
861
+
862
+ # Test backbone
863
+ for original_model_feature, our_model_feature in zip(
864
+ original_model_backbone_features.values(), our_model_output.encoder_hidden_states
865
+ ):
866
+ assert torch.allclose(
867
+ original_model_feature, our_model_feature, atol=tolerance
868
+ ), "The backbone features are not the same."
869
+
870
+ # Test pixel decoder
871
+ mask_features, _, multi_scale_features = original_model.sem_seg_head.pixel_decoder.forward_features(
872
+ original_model_backbone_features
873
+ )
874
+
875
+ for original_model_feature, our_model_feature in zip(
876
+ multi_scale_features, our_model_output.pixel_decoder_hidden_states
877
+ ):
878
+ assert torch.allclose(
879
+ original_model_feature, our_model_feature, atol=tolerance
880
+ ), "The pixel decoder feature are not the same"
881
+
882
+ # Let's test the full model
883
+ tr_complete = T.Compose(
884
+ [T.Resize((384, 384)), T.ToTensor()],
885
+ )
886
+ y = (tr_complete(im) * 255.0).to(torch.int).float()
887
+
888
+ # modify original Mask2Former code to return mask and class logits
889
+ original_class_logits, original_mask_logits = original_model([{"image": y.clone().squeeze(0)}])
890
+
891
+ our_model_out: Mask2FormerForUniversalSegmentationOutput = our_model(x.clone())
892
+ our_mask_logits = our_model_out.masks_queries_logits
893
+ our_class_logits = our_model_out.class_queries_logits
894
+
895
+ assert original_mask_logits.shape == our_mask_logits.shape, "Output masks shapes are not matching."
896
+ assert original_class_logits.shape == our_class_logits.shape, "Output class logits shapes are not matching."
897
+ assert torch.allclose(
898
+ original_class_logits, our_class_logits, atol=tolerance
899
+ ), "The class logits are not the same."
900
+ assert torch.allclose(
901
+ original_mask_logits, our_mask_logits, atol=tolerance
902
+ ), "The predicted masks are not the same."
903
+
904
+ logger.info("✅ Test passed!")
905
+
906
+
907
+ def get_model_name(checkpoint_file: Path):
908
+ # model_name_raw is something like maskformer2_swin_small_bs16_50ep
909
+ model_name_raw: str = checkpoint_file.parents[0].stem
910
+
911
+ # `segmentation_task_type` must be one of the following: `instance-segmentation`, `panoptic-segmentation`, `semantic-segmentation`
912
+ segmentation_task_name: str = checkpoint_file.parents[1].stem
913
+ if segmentation_task_name not in ["instance-segmentation", "panoptic-segmentation", "semantic-segmentation"]:
914
+ raise ValueError(
915
+ f"{segmentation_task_name} must be wrong since acceptable values are: instance-segmentation,"
916
+ " panoptic-segmentation, semantic-segmentation."
917
+ )
918
+
919
+ # dataset name must be one of the following: `coco`, `ade`, `cityscapes`, `mapillary-vistas`
920
+ dataset_name: str = checkpoint_file.parents[2].stem
921
+ if dataset_name not in ["coco", "ade", "cityscapes", "mapillary-vistas"]:
922
+ raise ValueError(
923
+ f"{dataset_name} must be wrong since we didn't find 'coco' or 'ade' or 'cityscapes' or 'mapillary-vistas'"
924
+ " in it "
925
+ )
926
+
927
+ backbone = "swin"
928
+ backbone_types = ["tiny", "small", "base_IN21k", "base", "large"]
929
+ backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0].replace("_", "-")
930
+
931
+ model_name = f"mask2former-{backbone}-{backbone_type}-{dataset_name}-{segmentation_task_name.split('-')[0]}"
932
+
933
+ return model_name
934
+
935
+
936
+ if __name__ == "__main__":
937
+ parser = ArgumentParser(
938
+ description="Command line to convert the original mask2formers (with swin backbone) to our implementations."
939
+ )
940
+
941
+ parser.add_argument(
942
+ "--checkpoints_dir",
943
+ type=Path,
944
+ help=(
945
+ "A directory containing the model's checkpoints. The directory has to have the following structure:"
946
+ " <DIR_NAME>/<DATASET_NAME>/<SEGMENTATION_TASK_NAME>/<CONFIG_NAME>.pkl"
947
+ ),
948
+ )
949
+ parser.add_argument(
950
+ "--configs_dir",
951
+ type=Path,
952
+ help=(
953
+ "A directory containing the model's configs, see detectron2 doc. The directory has to have the following"
954
+ " structure: <DIR_NAME>/<DATASET_NAME>/<SEGMENTATION_TASK_NAME>/<CONFIG_NAME>.yaml"
955
+ ),
956
+ )
957
+ parser.add_argument(
958
+ "--mask2former_dir",
959
+ required=True,
960
+ type=Path,
961
+ help=(
962
+ "A path to Mask2Former's original implementation directory. You can download from here:"
963
+ " https://github.com/facebookresearch/Mask2Former"
964
+ ),
965
+ )
966
+
967
+ args = parser.parse_args()
968
+
969
+ checkpoints_dir: Path = args.checkpoints_dir
970
+ config_dir: Path = args.configs_dir
971
+ mask2former_dir: Path = args.mask2former_dir
972
+ # append the path to the parents to mask2former dir
973
+ sys.path.append(str(mask2former_dir.parent))
974
+ # import original Mask2Former config and model from original source code repo
975
+ from Mask2Former.mask2former.config import add_maskformer2_config
976
+ from Mask2Former.mask2former.maskformer_model import MaskFormer as OriginalMask2Former
977
+
978
+ for config_file, checkpoint_file in OriginalMask2FormerCheckpointToOursConverter.using_dirs(
979
+ checkpoints_dir, config_dir
980
+ ):
981
+ model_name = get_model_name(checkpoint_file)
982
+ image_processor = OriginalMask2FormerConfigToImageProcessorConverter()(
983
+ setup_cfg(Args(config_file=config_file))
984
+ )
985
+ image_processor.size = {"height": 384, "width": 384}
986
+
987
+ original_config = setup_cfg(Args(config_file=config_file))
988
+ mask2former_kwargs = OriginalMask2Former.from_config(original_config)
989
+ original_model = OriginalMask2Former(**mask2former_kwargs).eval()
990
+
991
+ DetectionCheckpointer(original_model).load(str(checkpoint_file))
992
+
993
+ config: Mask2FormerConfig = OriginalMask2FormerConfigToOursConverter()(original_config)
994
+ mask2former = Mask2FormerModel(config=config).eval()
995
+
996
+ converter = OriginalMask2FormerCheckpointToOursConverter(original_model, config)
997
+ mask2former = converter.convert(mask2former)
998
+
999
+ mask2former_for_segmentation = Mask2FormerForUniversalSegmentation(config=config).eval()
1000
+ mask2former_for_segmentation.model = mask2former
1001
+
1002
+ mask2former_for_segmentation = converter.convert_universal_segmentation(mask2former_for_segmentation)
1003
+
1004
+ tolerance = 3e-1
1005
+ high_tolerance_models = [
1006
+ "mask2former-swin-base-IN21k-coco-instance",
1007
+ "mask2former-swin-base-coco-instance",
1008
+ "mask2former-swin-small-cityscapes-semantic",
1009
+ ]
1010
+
1011
+ if model_name in high_tolerance_models:
1012
+ tolerance = 3e-1
1013
+
1014
+ logger.info(f"🪄 Testing {model_name}...")
1015
+ test(original_model, mask2former_for_segmentation, image_processor, tolerance)
1016
+ logger.info(f"🪄 Pushing {model_name} to hub...")
1017
+
1018
+ image_processor.push_to_hub(model_name)
1019
+ mask2former_for_segmentation.push_to_hub(model_name)
venv/lib/python3.10/site-packages/transformers/models/mask2former/image_processing_mask2former.py ADDED
@@ -0,0 +1,1253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Mask2Former."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
24
+ from ...image_transforms import (
25
+ PaddingMode,
26
+ get_resize_output_image_size,
27
+ pad,
28
+ rescale,
29
+ resize,
30
+ to_channel_dimension_format,
31
+ )
32
+ from ...image_utils import (
33
+ ChannelDimension,
34
+ ImageInput,
35
+ PILImageResampling,
36
+ get_image_size,
37
+ infer_channel_dimension_format,
38
+ is_batched,
39
+ is_scaled_image,
40
+ to_numpy_array,
41
+ valid_images,
42
+ validate_kwargs,
43
+ validate_preprocess_arguments,
44
+ )
45
+ from ...utils import (
46
+ IMAGENET_DEFAULT_MEAN,
47
+ IMAGENET_DEFAULT_STD,
48
+ TensorType,
49
+ is_torch_available,
50
+ is_torch_tensor,
51
+ logging,
52
+ )
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+
58
+ if is_torch_available():
59
+ import torch
60
+ from torch import nn
61
+
62
+
63
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
64
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
65
+ """
66
+ Return the maximum value across all indices of an iterable of values.
67
+ """
68
+ return [max(values_i) for values_i in zip(*values)]
69
+
70
+
71
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
72
+ def get_max_height_width(
73
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
74
+ ) -> List[int]:
75
+ """
76
+ Get the maximum height and width across all images in a batch.
77
+ """
78
+ if input_data_format is None:
79
+ input_data_format = infer_channel_dimension_format(images[0])
80
+
81
+ if input_data_format == ChannelDimension.FIRST:
82
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
83
+ elif input_data_format == ChannelDimension.LAST:
84
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
85
+ else:
86
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
87
+ return (max_height, max_width)
88
+
89
+
90
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
91
+ def make_pixel_mask(
92
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
93
+ ) -> np.ndarray:
94
+ """
95
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
96
+
97
+ Args:
98
+ image (`np.ndarray`):
99
+ Image to make the pixel mask for.
100
+ output_size (`Tuple[int, int]`):
101
+ Output size of the mask.
102
+ """
103
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
104
+ mask = np.zeros(output_size, dtype=np.int64)
105
+ mask[:input_height, :input_width] = 1
106
+ return mask
107
+
108
+
109
+ # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
110
+ def binary_mask_to_rle(mask):
111
+ """
112
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
113
+
114
+ Args:
115
+ mask (`torch.Tensor` or `numpy.array`):
116
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
117
+ segment_id or class_id.
118
+ Returns:
119
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
120
+ format.
121
+ """
122
+ if is_torch_tensor(mask):
123
+ mask = mask.numpy()
124
+
125
+ pixels = mask.flatten()
126
+ pixels = np.concatenate([[0], pixels, [0]])
127
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
128
+ runs[1::2] -= runs[::2]
129
+ return list(runs)
130
+
131
+
132
+ # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
133
+ def convert_segmentation_to_rle(segmentation):
134
+ """
135
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
136
+
137
+ Args:
138
+ segmentation (`torch.Tensor` or `numpy.array`):
139
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
140
+ Returns:
141
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
142
+ """
143
+ segment_ids = torch.unique(segmentation)
144
+
145
+ run_length_encodings = []
146
+ for idx in segment_ids:
147
+ mask = torch.where(segmentation == idx, 1, 0)
148
+ rle = binary_mask_to_rle(mask)
149
+ run_length_encodings.append(rle)
150
+
151
+ return run_length_encodings
152
+
153
+
154
+ # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
155
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
156
+ """
157
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
158
+ `labels`.
159
+
160
+ Args:
161
+ masks (`torch.Tensor`):
162
+ A tensor of shape `(num_queries, height, width)`.
163
+ scores (`torch.Tensor`):
164
+ A tensor of shape `(num_queries)`.
165
+ labels (`torch.Tensor`):
166
+ A tensor of shape `(num_queries)`.
167
+ object_mask_threshold (`float`):
168
+ A number between 0 and 1 used to binarize the masks.
169
+ Raises:
170
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
171
+ Returns:
172
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
173
+ < `object_mask_threshold`.
174
+ """
175
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
176
+ raise ValueError("mask, scores and labels must have the same shape!")
177
+
178
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
179
+
180
+ return masks[to_keep], scores[to_keep], labels[to_keep]
181
+
182
+
183
+ # Copied from transformers.models.detr.image_processing_detr.check_segment_validity
184
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
185
+ # Get the mask associated with the k class
186
+ mask_k = mask_labels == k
187
+ mask_k_area = mask_k.sum()
188
+
189
+ # Compute the area of all the stuff in query k
190
+ original_area = (mask_probs[k] >= mask_threshold).sum()
191
+ mask_exists = mask_k_area > 0 and original_area > 0
192
+
193
+ # Eliminate disconnected tiny segments
194
+ if mask_exists:
195
+ area_ratio = mask_k_area / original_area
196
+ if not area_ratio.item() > overlap_mask_area_threshold:
197
+ mask_exists = False
198
+
199
+ return mask_exists, mask_k
200
+
201
+
202
+ # Copied from transformers.models.detr.image_processing_detr.compute_segments
203
+ def compute_segments(
204
+ mask_probs,
205
+ pred_scores,
206
+ pred_labels,
207
+ mask_threshold: float = 0.5,
208
+ overlap_mask_area_threshold: float = 0.8,
209
+ label_ids_to_fuse: Optional[Set[int]] = None,
210
+ target_size: Tuple[int, int] = None,
211
+ ):
212
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
213
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
214
+
215
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
216
+ segments: List[Dict] = []
217
+
218
+ if target_size is not None:
219
+ mask_probs = nn.functional.interpolate(
220
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
221
+ )[0]
222
+
223
+ current_segment_id = 0
224
+
225
+ # Weigh each mask by its prediction score
226
+ mask_probs *= pred_scores.view(-1, 1, 1)
227
+ mask_labels = mask_probs.argmax(0) # [height, width]
228
+
229
+ # Keep track of instances of each class
230
+ stuff_memory_list: Dict[str, int] = {}
231
+ for k in range(pred_labels.shape[0]):
232
+ pred_class = pred_labels[k].item()
233
+ should_fuse = pred_class in label_ids_to_fuse
234
+
235
+ # Check if mask exists and large enough to be a segment
236
+ mask_exists, mask_k = check_segment_validity(
237
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
238
+ )
239
+
240
+ if mask_exists:
241
+ if pred_class in stuff_memory_list:
242
+ current_segment_id = stuff_memory_list[pred_class]
243
+ else:
244
+ current_segment_id += 1
245
+
246
+ # Add current object segment to final segmentation map
247
+ segmentation[mask_k] = current_segment_id
248
+ segment_score = round(pred_scores[k].item(), 6)
249
+ segments.append(
250
+ {
251
+ "id": current_segment_id,
252
+ "label_id": pred_class,
253
+ "was_fused": should_fuse,
254
+ "score": segment_score,
255
+ }
256
+ )
257
+ if should_fuse:
258
+ stuff_memory_list[pred_class] = current_segment_id
259
+
260
+ return segmentation, segments
261
+
262
+
263
+ # TODO: (Amy) Move to image_transforms
264
+ # Copied from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks
265
+ def convert_segmentation_map_to_binary_masks(
266
+ segmentation_map: "np.ndarray",
267
+ instance_id_to_semantic_id: Optional[Dict[int, int]] = None,
268
+ ignore_index: Optional[int] = None,
269
+ reduce_labels: bool = False,
270
+ ):
271
+ if reduce_labels and ignore_index is None:
272
+ raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.")
273
+
274
+ if reduce_labels:
275
+ segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1)
276
+
277
+ # Get unique ids (class or instance ids based on input)
278
+ all_labels = np.unique(segmentation_map)
279
+
280
+ # Drop background label if applicable
281
+ if ignore_index is not None:
282
+ all_labels = all_labels[all_labels != ignore_index]
283
+
284
+ # Generate a binary mask for each object instance
285
+ binary_masks = [(segmentation_map == i) for i in all_labels]
286
+ binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width)
287
+
288
+ # Convert instance ids to class ids
289
+ if instance_id_to_semantic_id is not None:
290
+ labels = np.zeros(all_labels.shape[0])
291
+
292
+ for label in all_labels:
293
+ class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label]
294
+ labels[all_labels == label] = class_id - 1 if reduce_labels else class_id
295
+ else:
296
+ labels = all_labels
297
+
298
+ return binary_masks.astype(np.float32), labels.astype(np.int64)
299
+
300
+
301
+ # Copied from transformers.models.maskformer.image_processing_maskformer.get_maskformer_resize_output_image_size with maskformer->mask2former
302
+ def get_mask2former_resize_output_image_size(
303
+ image: np.ndarray,
304
+ size: Union[int, Tuple[int, int], List[int], Tuple[int]],
305
+ max_size: Optional[int] = None,
306
+ size_divisor: int = 0,
307
+ default_to_square: bool = True,
308
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
309
+ ) -> Tuple[int, int]:
310
+ """
311
+ Computes the output size given the desired size.
312
+
313
+ Args:
314
+ image (`np.ndarray`):
315
+ The input image.
316
+ size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`):
317
+ The size of the output image.
318
+ max_size (`int`, *optional*):
319
+ The maximum size of the output image.
320
+ size_divisor (`int`, *optional*, defaults to 0):
321
+ If `size_divisor` is given, the output image size will be divisible by the number.
322
+ default_to_square (`bool`, *optional*, defaults to `True`):
323
+ Whether to default to square if no size is provided.
324
+ input_data_format (`ChannelDimension` or `str`, *optional*):
325
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
326
+
327
+ Returns:
328
+ `Tuple[int, int]`: The output size.
329
+ """
330
+ output_size = get_resize_output_image_size(
331
+ input_image=image,
332
+ size=size,
333
+ default_to_square=default_to_square,
334
+ max_size=max_size,
335
+ input_data_format=input_data_format,
336
+ )
337
+
338
+ if size_divisor > 0:
339
+ height, width = output_size
340
+ height = int(math.ceil(height / size_divisor) * size_divisor)
341
+ width = int(math.ceil(width / size_divisor) * size_divisor)
342
+ output_size = (height, width)
343
+
344
+ return output_size
345
+
346
+
347
+ class Mask2FormerImageProcessor(BaseImageProcessor):
348
+ r"""
349
+ Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets
350
+ for the model.
351
+
352
+ This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
353
+ refer to this superclass for more information regarding those methods.
354
+
355
+ Args:
356
+ do_resize (`bool`, *optional*, defaults to `True`):
357
+ Whether to resize the input to a certain `size`.
358
+ size (`int`, *optional*, defaults to 800):
359
+ Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
360
+ sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
361
+ the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
362
+ height / width, size)`.
363
+ size_divisor (`int`, *optional*, defaults to 32):
364
+ Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in
365
+ Swin Transformer.
366
+ resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
367
+ An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
368
+ `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
369
+ `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
370
+ to `True`.
371
+ do_rescale (`bool`, *optional*, defaults to `True`):
372
+ Whether to rescale the input to a certain `scale`.
373
+ rescale_factor (`float`, *optional*, defaults to `1/ 255`):
374
+ Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
375
+ do_normalize (`bool`, *optional*, defaults to `True`):
376
+ Whether or not to normalize the input with mean and standard deviation.
377
+ image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
378
+ The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
379
+ image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
380
+ The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
381
+ ImageNet std.
382
+ ignore_index (`int`, *optional*):
383
+ Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
384
+ denoted with 0 (background) will be replaced with `ignore_index`.
385
+ reduce_labels (`bool`, *optional*, defaults to `False`):
386
+ Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
387
+ is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
388
+ The background label will be replaced by `ignore_index`.
389
+
390
+ """
391
+
392
+ model_input_names = ["pixel_values", "pixel_mask"]
393
+
394
+ def __init__(
395
+ self,
396
+ do_resize: bool = True,
397
+ size: Dict[str, int] = None,
398
+ size_divisor: int = 32,
399
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
400
+ do_rescale: bool = True,
401
+ rescale_factor: float = 1 / 255,
402
+ do_normalize: bool = True,
403
+ image_mean: Union[float, List[float]] = None,
404
+ image_std: Union[float, List[float]] = None,
405
+ ignore_index: Optional[int] = None,
406
+ reduce_labels: bool = False,
407
+ **kwargs,
408
+ ):
409
+ if "size_divisibility" in kwargs:
410
+ warnings.warn(
411
+ "The `size_divisibility` argument is deprecated and will be removed in v4.27. Please use "
412
+ "`size_divisor` instead.",
413
+ FutureWarning,
414
+ )
415
+ size_divisor = kwargs.pop("size_divisibility")
416
+ if "max_size" in kwargs:
417
+ warnings.warn(
418
+ "The `max_size` argument is deprecated and will be removed in v4.27. Please use size['longest_edge']"
419
+ " instead.",
420
+ FutureWarning,
421
+ )
422
+ # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst
423
+ # `size` can still be pass in as an int
424
+ self._max_size = kwargs.pop("max_size")
425
+ else:
426
+ self._max_size = 1333
427
+
428
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size}
429
+ size = get_size_dict(size, max_size=self._max_size, default_to_square=False)
430
+
431
+ super().__init__(**kwargs)
432
+ self.do_resize = do_resize
433
+ self.size = size
434
+ self.resample = resample
435
+ self.size_divisor = size_divisor
436
+ self.do_rescale = do_rescale
437
+ self.rescale_factor = rescale_factor
438
+ self.do_normalize = do_normalize
439
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
440
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
441
+ self.ignore_index = ignore_index
442
+ self.reduce_labels = reduce_labels
443
+ self._valid_processor_keys = [
444
+ "images",
445
+ "segmentation_maps",
446
+ "instance_id_to_semantic_id",
447
+ "do_resize",
448
+ "size",
449
+ "size_divisor",
450
+ "resample",
451
+ "do_rescale",
452
+ "rescale_factor",
453
+ "do_normalize",
454
+ "image_mean",
455
+ "image_std",
456
+ "ignore_index",
457
+ "reduce_labels",
458
+ "return_tensors",
459
+ "data_format",
460
+ "input_data_format",
461
+ ]
462
+
463
+ @classmethod
464
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
465
+ """
466
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
467
+ created using from_dict and kwargs e.g. `Mask2FormerImageProcessor.from_pretrained(checkpoint, max_size=800)`
468
+ """
469
+ image_processor_dict = image_processor_dict.copy()
470
+ if "max_size" in kwargs:
471
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
472
+ if "size_divisibility" in kwargs:
473
+ image_processor_dict["size_divisibility"] = kwargs.pop("size_divisibility")
474
+ return super().from_dict(image_processor_dict, **kwargs)
475
+
476
+ # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.resize with get_maskformer_resize_output_image_size->get_mask2former_resize_output_image_size
477
+ def resize(
478
+ self,
479
+ image: np.ndarray,
480
+ size: Dict[str, int],
481
+ size_divisor: int = 0,
482
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
483
+ data_format=None,
484
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
485
+ **kwargs,
486
+ ) -> np.ndarray:
487
+ """
488
+ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
489
+ int, smaller edge of the image will be matched to this number.
490
+
491
+ Args:
492
+ image (`np.ndarray`):
493
+ Image to resize.
494
+ size (`Dict[str, int]`):
495
+ The size of the output image.
496
+ size_divisor (`int`, *optional*, defaults to 0):
497
+ If `size_divisor` is given, the output image size will be divisible by the number.
498
+ resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
499
+ Resampling filter to use when resizing the image.
500
+ data_format (`ChannelDimension` or `str`, *optional*):
501
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
502
+ image is used.
503
+ input_data_format (`ChannelDimension` or `str`, *optional*):
504
+ The channel dimension format of the input image. If not provided, it will be inferred.
505
+ """
506
+ if "max_size" in kwargs:
507
+ warnings.warn(
508
+ "The `max_size` parameter is deprecated and will be removed in v4.27. "
509
+ "Please specify in `size['longest_edge'] instead`.",
510
+ FutureWarning,
511
+ )
512
+ max_size = kwargs.pop("max_size")
513
+ else:
514
+ max_size = None
515
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
516
+ if "shortest_edge" in size and "longest_edge" in size:
517
+ size, max_size = size["shortest_edge"], size["longest_edge"]
518
+ elif "height" in size and "width" in size:
519
+ size = (size["height"], size["width"])
520
+ max_size = None
521
+ else:
522
+ raise ValueError(
523
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
524
+ f" {size.keys()}."
525
+ )
526
+ size = get_mask2former_resize_output_image_size(
527
+ image=image,
528
+ size=size,
529
+ max_size=max_size,
530
+ size_divisor=size_divisor,
531
+ default_to_square=False,
532
+ input_data_format=input_data_format,
533
+ )
534
+ image = resize(
535
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
536
+ )
537
+ return image
538
+
539
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
540
+ def rescale(
541
+ self,
542
+ image: np.ndarray,
543
+ rescale_factor: float,
544
+ data_format: Optional[Union[str, ChannelDimension]] = None,
545
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
546
+ ) -> np.ndarray:
547
+ """
548
+ Rescale the image by the given factor. image = image * rescale_factor.
549
+
550
+ Args:
551
+ image (`np.ndarray`):
552
+ Image to rescale.
553
+ rescale_factor (`float`):
554
+ The value to use for rescaling.
555
+ data_format (`str` or `ChannelDimension`, *optional*):
556
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
557
+ image is used. Can be one of:
558
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
559
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
560
+ input_data_format (`str` or `ChannelDimension`, *optional*):
561
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
562
+ one of:
563
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
564
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
565
+ """
566
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
567
+
568
+ # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks
569
+ def convert_segmentation_map_to_binary_masks(
570
+ self,
571
+ segmentation_map: "np.ndarray",
572
+ instance_id_to_semantic_id: Optional[Dict[int, int]] = None,
573
+ ignore_index: Optional[int] = None,
574
+ reduce_labels: bool = False,
575
+ ):
576
+ reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels
577
+ ignore_index = ignore_index if ignore_index is not None else self.ignore_index
578
+ return convert_segmentation_map_to_binary_masks(
579
+ segmentation_map=segmentation_map,
580
+ instance_id_to_semantic_id=instance_id_to_semantic_id,
581
+ ignore_index=ignore_index,
582
+ reduce_labels=reduce_labels,
583
+ )
584
+
585
+ def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature:
586
+ return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs)
587
+
588
+ def _preprocess(
589
+ self,
590
+ image: ImageInput,
591
+ do_resize: bool = None,
592
+ size: Dict[str, int] = None,
593
+ size_divisor: int = None,
594
+ resample: PILImageResampling = None,
595
+ do_rescale: bool = None,
596
+ rescale_factor: float = None,
597
+ do_normalize: bool = None,
598
+ image_mean: Optional[Union[float, List[float]]] = None,
599
+ image_std: Optional[Union[float, List[float]]] = None,
600
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
601
+ ):
602
+ if do_resize:
603
+ image = self.resize(
604
+ image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format
605
+ )
606
+ if do_rescale:
607
+ image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)
608
+ if do_normalize:
609
+ image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
610
+ return image
611
+
612
+ def _preprocess_image(
613
+ self,
614
+ image: ImageInput,
615
+ do_resize: bool = None,
616
+ size: Dict[str, int] = None,
617
+ size_divisor: int = None,
618
+ resample: PILImageResampling = None,
619
+ do_rescale: bool = None,
620
+ rescale_factor: float = None,
621
+ do_normalize: bool = None,
622
+ image_mean: Optional[Union[float, List[float]]] = None,
623
+ image_std: Optional[Union[float, List[float]]] = None,
624
+ data_format: Optional[Union[str, ChannelDimension]] = None,
625
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
626
+ ) -> np.ndarray:
627
+ """Preprocesses a single image."""
628
+ # All transformations expect numpy arrays.
629
+ image = to_numpy_array(image)
630
+ if is_scaled_image(image) and do_rescale:
631
+ logger.warning_once(
632
+ "It looks like you are trying to rescale already rescaled images. If the input"
633
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
634
+ )
635
+ if input_data_format is None:
636
+ input_data_format = infer_channel_dimension_format(image)
637
+ image = self._preprocess(
638
+ image=image,
639
+ do_resize=do_resize,
640
+ size=size,
641
+ size_divisor=size_divisor,
642
+ resample=resample,
643
+ do_rescale=do_rescale,
644
+ rescale_factor=rescale_factor,
645
+ do_normalize=do_normalize,
646
+ image_mean=image_mean,
647
+ image_std=image_std,
648
+ input_data_format=input_data_format,
649
+ )
650
+ if data_format is not None:
651
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
652
+ return image
653
+
654
+ def _preprocess_mask(
655
+ self,
656
+ segmentation_map: ImageInput,
657
+ do_resize: bool = None,
658
+ size: Dict[str, int] = None,
659
+ size_divisor: int = 0,
660
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
661
+ ) -> np.ndarray:
662
+ """Preprocesses a single mask."""
663
+ segmentation_map = to_numpy_array(segmentation_map)
664
+ # Add channel dimension if missing - needed for certain transformations
665
+ if segmentation_map.ndim == 2:
666
+ added_channel_dim = True
667
+ segmentation_map = segmentation_map[None, ...]
668
+ input_data_format = ChannelDimension.FIRST
669
+ else:
670
+ added_channel_dim = False
671
+ if input_data_format is None:
672
+ input_data_format = infer_channel_dimension_format(segmentation_map)
673
+ # TODO: (Amy)
674
+ # Remork segmentation map processing to include reducing labels and resizing which doesn't
675
+ # drop segment IDs > 255.
676
+ segmentation_map = self._preprocess(
677
+ image=segmentation_map,
678
+ do_resize=do_resize,
679
+ resample=PILImageResampling.NEAREST,
680
+ size=size,
681
+ size_divisor=size_divisor,
682
+ do_rescale=False,
683
+ do_normalize=False,
684
+ input_data_format=input_data_format,
685
+ )
686
+ # Remove extra channel dimension if added for processing
687
+ if added_channel_dim:
688
+ segmentation_map = segmentation_map.squeeze(0)
689
+ return segmentation_map
690
+
691
+ def preprocess(
692
+ self,
693
+ images: ImageInput,
694
+ segmentation_maps: Optional[ImageInput] = None,
695
+ instance_id_to_semantic_id: Optional[Dict[int, int]] = None,
696
+ do_resize: Optional[bool] = None,
697
+ size: Optional[Dict[str, int]] = None,
698
+ size_divisor: Optional[int] = None,
699
+ resample: PILImageResampling = None,
700
+ do_rescale: Optional[bool] = None,
701
+ rescale_factor: Optional[float] = None,
702
+ do_normalize: Optional[bool] = None,
703
+ image_mean: Optional[Union[float, List[float]]] = None,
704
+ image_std: Optional[Union[float, List[float]]] = None,
705
+ ignore_index: Optional[int] = None,
706
+ reduce_labels: Optional[bool] = None,
707
+ return_tensors: Optional[Union[str, TensorType]] = None,
708
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
709
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
710
+ **kwargs,
711
+ ) -> BatchFeature:
712
+ if "pad_and_return_pixel_mask" in kwargs:
713
+ warnings.warn(
714
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version",
715
+ FutureWarning,
716
+ )
717
+
718
+ do_resize = do_resize if do_resize is not None else self.do_resize
719
+ size = size if size is not None else self.size
720
+ size = get_size_dict(size, default_to_square=False, max_size=self._max_size)
721
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
722
+ resample = resample if resample is not None else self.resample
723
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
724
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
725
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
726
+ image_mean = image_mean if image_mean is not None else self.image_mean
727
+ image_std = image_std if image_std is not None else self.image_std
728
+ ignore_index = ignore_index if ignore_index is not None else self.ignore_index
729
+ reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels
730
+
731
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
732
+
733
+ if not valid_images(images):
734
+ raise ValueError(
735
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
736
+ "torch.Tensor, tf.Tensor or jax.ndarray."
737
+ )
738
+
739
+ validate_preprocess_arguments(
740
+ do_rescale=do_rescale,
741
+ rescale_factor=rescale_factor,
742
+ do_normalize=do_normalize,
743
+ image_mean=image_mean,
744
+ image_std=image_std,
745
+ do_resize=do_resize,
746
+ size=size,
747
+ resample=resample,
748
+ )
749
+
750
+ if segmentation_maps is not None and not valid_images(segmentation_maps):
751
+ raise ValueError(
752
+ "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
753
+ "torch.Tensor, tf.Tensor or jax.ndarray."
754
+ )
755
+
756
+ if not is_batched(images):
757
+ images = [images]
758
+ segmentation_maps = [segmentation_maps] if segmentation_maps is not None else None
759
+
760
+ if segmentation_maps is not None and len(images) != len(segmentation_maps):
761
+ raise ValueError("Images and segmentation maps must have the same length.")
762
+
763
+ images = [
764
+ self._preprocess_image(
765
+ image,
766
+ do_resize=do_resize,
767
+ size=size,
768
+ size_divisor=size_divisor,
769
+ resample=resample,
770
+ do_rescale=do_rescale,
771
+ rescale_factor=rescale_factor,
772
+ do_normalize=do_normalize,
773
+ image_mean=image_mean,
774
+ image_std=image_std,
775
+ data_format=data_format,
776
+ input_data_format=input_data_format,
777
+ )
778
+ for image in images
779
+ ]
780
+
781
+ if segmentation_maps is not None:
782
+ segmentation_maps = [
783
+ self._preprocess_mask(
784
+ segmentation_map, do_resize, size, size_divisor, input_data_format=input_data_format
785
+ )
786
+ for segmentation_map in segmentation_maps
787
+ ]
788
+ encoded_inputs = self.encode_inputs(
789
+ images,
790
+ segmentation_maps,
791
+ instance_id_to_semantic_id,
792
+ ignore_index,
793
+ reduce_labels,
794
+ return_tensors,
795
+ input_data_format=input_data_format,
796
+ )
797
+ return encoded_inputs
798
+
799
+ # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
800
+ def _pad_image(
801
+ self,
802
+ image: np.ndarray,
803
+ output_size: Tuple[int, int],
804
+ constant_values: Union[float, Iterable[float]] = 0,
805
+ data_format: Optional[ChannelDimension] = None,
806
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
807
+ ) -> np.ndarray:
808
+ """
809
+ Pad an image with zeros to the given size.
810
+ """
811
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
812
+ output_height, output_width = output_size
813
+
814
+ pad_bottom = output_height - input_height
815
+ pad_right = output_width - input_width
816
+ padding = ((0, pad_bottom), (0, pad_right))
817
+ padded_image = pad(
818
+ image,
819
+ padding,
820
+ mode=PaddingMode.CONSTANT,
821
+ constant_values=constant_values,
822
+ data_format=data_format,
823
+ input_data_format=input_data_format,
824
+ )
825
+ return padded_image
826
+
827
+ # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad
828
+ def pad(
829
+ self,
830
+ images: List[np.ndarray],
831
+ constant_values: Union[float, Iterable[float]] = 0,
832
+ return_pixel_mask: bool = True,
833
+ return_tensors: Optional[Union[str, TensorType]] = None,
834
+ data_format: Optional[ChannelDimension] = None,
835
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
836
+ ) -> BatchFeature:
837
+ """
838
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
839
+ in the batch and optionally returns their corresponding pixel mask.
840
+
841
+ Args:
842
+ image (`np.ndarray`):
843
+ Image to pad.
844
+ constant_values (`float` or `Iterable[float]`, *optional*):
845
+ The value to use for the padding if `mode` is `"constant"`.
846
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
847
+ Whether to return a pixel mask.
848
+ return_tensors (`str` or `TensorType`, *optional*):
849
+ The type of tensors to return. Can be one of:
850
+ - Unset: Return a list of `np.ndarray`.
851
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
852
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
853
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
854
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
855
+ data_format (`str` or `ChannelDimension`, *optional*):
856
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
857
+ input_data_format (`ChannelDimension` or `str`, *optional*):
858
+ The channel dimension format of the input image. If not provided, it will be inferred.
859
+ """
860
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
861
+
862
+ padded_images = [
863
+ self._pad_image(
864
+ image,
865
+ pad_size,
866
+ constant_values=constant_values,
867
+ data_format=data_format,
868
+ input_data_format=input_data_format,
869
+ )
870
+ for image in images
871
+ ]
872
+ data = {"pixel_values": padded_images}
873
+
874
+ if return_pixel_mask:
875
+ masks = [
876
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
877
+ for image in images
878
+ ]
879
+ data["pixel_mask"] = masks
880
+
881
+ return BatchFeature(data=data, tensor_type=return_tensors)
882
+
883
+ def encode_inputs(
884
+ self,
885
+ pixel_values_list: List[ImageInput],
886
+ segmentation_maps: ImageInput = None,
887
+ instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None,
888
+ ignore_index: Optional[int] = None,
889
+ reduce_labels: bool = False,
890
+ return_tensors: Optional[Union[str, TensorType]] = None,
891
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
892
+ ):
893
+ """
894
+ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
895
+
896
+ Mask2Former addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
897
+ will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
898
+ `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
899
+ [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
900
+ each mask.
901
+
902
+ Args:
903
+ pixel_values_list (`List[ImageInput]`):
904
+ List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
905
+ width)`.
906
+
907
+ segmentation_maps (`ImageInput`, *optional*):
908
+ The corresponding semantic segmentation maps with the pixel-wise annotations.
909
+
910
+ (`bool`, *optional*, defaults to `True`):
911
+ Whether or not to pad images up to the largest image in a batch and create a pixel mask.
912
+
913
+ If left to the default, will return a pixel mask that is:
914
+
915
+ - 1 for pixels that are real (i.e. **not masked**),
916
+ - 0 for pixels that are padding (i.e. **masked**).
917
+
918
+ instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*):
919
+ A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
920
+ instance segmentation map where each pixel represents an instance id. Can be provided as a single
921
+ dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
922
+ instance ids in each image separately.
923
+
924
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
925
+ If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
926
+ objects.
927
+
928
+ input_data_format (`ChannelDimension` or `str`, *optional*):
929
+ The channel dimension format of the input image. If not provided, it will be inferred.
930
+
931
+ Returns:
932
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
933
+
934
+ - **pixel_values** -- Pixel values to be fed to a model.
935
+ - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
936
+ `self.model_input_names`).
937
+ - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
938
+ (when `annotations` are provided).
939
+ - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
940
+ `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
941
+ `mask_labels[i][j]` if `class_labels[i][j]`.
942
+ """
943
+ ignore_index = self.ignore_index if ignore_index is None else ignore_index
944
+ reduce_labels = self.reduce_labels if reduce_labels is None else reduce_labels
945
+
946
+ pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
947
+
948
+ if input_data_format is None:
949
+ input_data_format = infer_channel_dimension_format(pixel_values_list[0])
950
+
951
+ encoded_inputs = self.pad(
952
+ pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format
953
+ )
954
+
955
+ if segmentation_maps is not None:
956
+ mask_labels = []
957
+ class_labels = []
958
+ pad_size = get_max_height_width(pixel_values_list)
959
+ # Convert to list of binary masks and labels
960
+ for idx, segmentation_map in enumerate(segmentation_maps):
961
+ segmentation_map = to_numpy_array(segmentation_map)
962
+ if isinstance(instance_id_to_semantic_id, list):
963
+ instance_id = instance_id_to_semantic_id[idx]
964
+ else:
965
+ instance_id = instance_id_to_semantic_id
966
+ # Use instance2class_id mapping per image
967
+ masks, classes = self.convert_segmentation_map_to_binary_masks(
968
+ segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels
969
+ )
970
+ # We add an axis to make them compatible with the transformations library
971
+ # this will be removed in the future
972
+ masks = [mask[None, ...] for mask in masks]
973
+ masks = [
974
+ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks
975
+ ]
976
+ masks = np.concatenate(masks, axis=0)
977
+ mask_labels.append(torch.from_numpy(masks))
978
+ class_labels.append(torch.from_numpy(classes))
979
+
980
+ # we cannot batch them since they don't share a common class size
981
+ encoded_inputs["mask_labels"] = mask_labels
982
+ encoded_inputs["class_labels"] = class_labels
983
+
984
+ return encoded_inputs
985
+
986
+ def post_process_semantic_segmentation(
987
+ self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None
988
+ ) -> "torch.Tensor":
989
+ """
990
+ Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports
991
+ PyTorch.
992
+
993
+ Args:
994
+ outputs ([`Mask2FormerForUniversalSegmentation`]):
995
+ Raw outputs of the model.
996
+ target_sizes (`List[Tuple[int, int]]`, *optional*):
997
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
998
+ final size (height, width) of each prediction. If left to None, predictions will not be resized.
999
+ Returns:
1000
+ `List[torch.Tensor]`:
1001
+ A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
1002
+ corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
1003
+ `torch.Tensor` correspond to a semantic class id.
1004
+ """
1005
+ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
1006
+ masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
1007
+
1008
+ # Scale back to preprocessed image size - (384, 384) for all models
1009
+ masks_queries_logits = torch.nn.functional.interpolate(
1010
+ masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
1011
+ )
1012
+
1013
+ # Remove the null class `[..., :-1]`
1014
+ masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
1015
+ masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1016
+
1017
+ # Semantic segmentation logits of shape (batch_size, num_classes, height, width)
1018
+ segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
1019
+ batch_size = class_queries_logits.shape[0]
1020
+
1021
+ # Resize logits and compute semantic segmentation maps
1022
+ if target_sizes is not None:
1023
+ if batch_size != len(target_sizes):
1024
+ raise ValueError(
1025
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1026
+ )
1027
+
1028
+ semantic_segmentation = []
1029
+ for idx in range(batch_size):
1030
+ resized_logits = torch.nn.functional.interpolate(
1031
+ segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
1032
+ )
1033
+ semantic_map = resized_logits[0].argmax(dim=0)
1034
+ semantic_segmentation.append(semantic_map)
1035
+ else:
1036
+ semantic_segmentation = segmentation.argmax(dim=1)
1037
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
1038
+
1039
+ return semantic_segmentation
1040
+
1041
+ def post_process_instance_segmentation(
1042
+ self,
1043
+ outputs,
1044
+ threshold: float = 0.5,
1045
+ mask_threshold: float = 0.5,
1046
+ overlap_mask_area_threshold: float = 0.8,
1047
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1048
+ return_coco_annotation: Optional[bool] = False,
1049
+ return_binary_maps: Optional[bool] = False,
1050
+ ) -> List[Dict]:
1051
+ """
1052
+ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions.
1053
+ Only supports PyTorch.
1054
+
1055
+ Args:
1056
+ outputs ([`Mask2FormerForUniversalSegmentation`]):
1057
+ Raw outputs of the model.
1058
+ threshold (`float`, *optional*, defaults to 0.5):
1059
+ The probability score threshold to keep predicted instance masks.
1060
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1061
+ Threshold to use when turning the predicted masks into binary values.
1062
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1063
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1064
+ instance mask.
1065
+ target_sizes (`List[Tuple]`, *optional*):
1066
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1067
+ final size (height, width) of each prediction. If left to None, predictions will not be resized.
1068
+ return_coco_annotation (`bool`, *optional*, defaults to `False`):
1069
+ If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
1070
+ return_binary_maps (`bool`, *optional*, defaults to `False`):
1071
+ If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
1072
+ (one per detected instance).
1073
+ Returns:
1074
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1075
+ - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1076
+ `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
1077
+ `True`. Set to `None` if no mask if found above `threshold`.
1078
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1079
+ - **id** -- An integer representing the `segment_id`.
1080
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1081
+ - **score** -- Prediction score of segment with `segment_id`.
1082
+ """
1083
+ if return_coco_annotation and return_binary_maps:
1084
+ raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.")
1085
+
1086
+ # [batch_size, num_queries, num_classes+1]
1087
+ class_queries_logits = outputs.class_queries_logits
1088
+ # [batch_size, num_queries, height, width]
1089
+ masks_queries_logits = outputs.masks_queries_logits
1090
+
1091
+ # Scale back to preprocessed image size - (384, 384) for all models
1092
+ masks_queries_logits = torch.nn.functional.interpolate(
1093
+ masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
1094
+ )
1095
+
1096
+ device = masks_queries_logits.device
1097
+ num_classes = class_queries_logits.shape[-1] - 1
1098
+ num_queries = class_queries_logits.shape[-2]
1099
+
1100
+ # Loop over items in batch size
1101
+ results: List[Dict[str, TensorType]] = []
1102
+
1103
+ for i in range(class_queries_logits.shape[0]):
1104
+ mask_pred = masks_queries_logits[i]
1105
+ mask_cls = class_queries_logits[i]
1106
+
1107
+ scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
1108
+ labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
1109
+
1110
+ scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
1111
+ labels_per_image = labels[topk_indices]
1112
+
1113
+ topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor")
1114
+ mask_pred = mask_pred[topk_indices]
1115
+ pred_masks = (mask_pred > 0).float()
1116
+
1117
+ # Calculate average mask prob
1118
+ mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
1119
+ pred_masks.flatten(1).sum(1) + 1e-6
1120
+ )
1121
+ pred_scores = scores_per_image * mask_scores_per_image
1122
+ pred_classes = labels_per_image
1123
+
1124
+ segmentation = torch.zeros((384, 384)) - 1
1125
+ if target_sizes is not None:
1126
+ segmentation = torch.zeros(target_sizes[i]) - 1
1127
+ pred_masks = torch.nn.functional.interpolate(
1128
+ pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest"
1129
+ )[0]
1130
+
1131
+ instance_maps, segments = [], []
1132
+ current_segment_id = 0
1133
+ for j in range(num_queries):
1134
+ score = pred_scores[j].item()
1135
+
1136
+ if not torch.all(pred_masks[j] == 0) and score >= threshold:
1137
+ segmentation[pred_masks[j] == 1] = current_segment_id
1138
+ segments.append(
1139
+ {
1140
+ "id": current_segment_id,
1141
+ "label_id": pred_classes[j].item(),
1142
+ "was_fused": False,
1143
+ "score": round(score, 6),
1144
+ }
1145
+ )
1146
+ current_segment_id += 1
1147
+ instance_maps.append(pred_masks[j])
1148
+
1149
+ # Return segmentation map in run-length encoding (RLE) format
1150
+ if return_coco_annotation:
1151
+ segmentation = convert_segmentation_to_rle(segmentation)
1152
+
1153
+ # Return a concatenated tensor of binary instance maps
1154
+ if return_binary_maps and len(instance_maps) != 0:
1155
+ segmentation = torch.stack(instance_maps, dim=0)
1156
+
1157
+ results.append({"segmentation": segmentation, "segments_info": segments})
1158
+ return results
1159
+
1160
+ def post_process_panoptic_segmentation(
1161
+ self,
1162
+ outputs,
1163
+ threshold: float = 0.5,
1164
+ mask_threshold: float = 0.5,
1165
+ overlap_mask_area_threshold: float = 0.8,
1166
+ label_ids_to_fuse: Optional[Set[int]] = None,
1167
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1168
+ ) -> List[Dict]:
1169
+ """
1170
+ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation
1171
+ predictions. Only supports PyTorch.
1172
+
1173
+ Args:
1174
+ outputs ([`Mask2FormerForUniversalSegmentationOutput`]):
1175
+ The outputs from [`Mask2FormerForUniversalSegmentation`].
1176
+ threshold (`float`, *optional*, defaults to 0.5):
1177
+ The probability score threshold to keep predicted instance masks.
1178
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1179
+ Threshold to use when turning the predicted masks into binary values.
1180
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1181
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1182
+ instance mask.
1183
+ label_ids_to_fuse (`Set[int]`, *optional*):
1184
+ The labels in this state will have all their instances be fused together. For instance we could say
1185
+ there can only be one sky in an image, but several persons, so the label ID for sky would be in that
1186
+ set, but not the one for person.
1187
+ target_sizes (`List[Tuple]`, *optional*):
1188
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1189
+ final size (height, width) of each prediction in batch. If left to None, predictions will not be
1190
+ resized.
1191
+
1192
+ Returns:
1193
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1194
+ - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
1195
+ to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
1196
+ to the corresponding `target_sizes` entry.
1197
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1198
+ - **id** -- an integer representing the `segment_id`.
1199
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1200
+ - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
1201
+ Multiple instances of the same class / label were fused and assigned a single `segment_id`.
1202
+ - **score** -- Prediction score of segment with `segment_id`.
1203
+ """
1204
+
1205
+ if label_ids_to_fuse is None:
1206
+ logger.warning("`label_ids_to_fuse` unset. No instance will be fused.")
1207
+ label_ids_to_fuse = set()
1208
+
1209
+ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
1210
+ masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
1211
+
1212
+ # Scale back to preprocessed image size - (384, 384) for all models
1213
+ masks_queries_logits = torch.nn.functional.interpolate(
1214
+ masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
1215
+ )
1216
+
1217
+ batch_size = class_queries_logits.shape[0]
1218
+ num_labels = class_queries_logits.shape[-1] - 1
1219
+
1220
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1221
+
1222
+ # Predicted label and score of each query (batch_size, num_queries)
1223
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1224
+
1225
+ # Loop over items in batch size
1226
+ results: List[Dict[str, TensorType]] = []
1227
+
1228
+ for i in range(batch_size):
1229
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1230
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1231
+ )
1232
+
1233
+ # No mask found
1234
+ if mask_probs_item.shape[0] <= 0:
1235
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1236
+ segmentation = torch.zeros((height, width)) - 1
1237
+ results.append({"segmentation": segmentation, "segments_info": []})
1238
+ continue
1239
+
1240
+ # Get segmentation map and segment information of batch item
1241
+ target_size = target_sizes[i] if target_sizes is not None else None
1242
+ segmentation, segments = compute_segments(
1243
+ mask_probs=mask_probs_item,
1244
+ pred_scores=pred_scores_item,
1245
+ pred_labels=pred_labels_item,
1246
+ mask_threshold=mask_threshold,
1247
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1248
+ label_ids_to_fuse=label_ids_to_fuse,
1249
+ target_size=target_size,
1250
+ )
1251
+
1252
+ results.append({"segmentation": segmentation, "segments_info": segments})
1253
+ return results
venv/lib/python3.10/site-packages/transformers/models/mask2former/modeling_mask2former.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mobilenet_v2": [
21
+ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "MobileNetV2Config",
23
+ "MobileNetV2OnnxConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_vision_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["feature_extraction_mobilenet_v2"] = ["MobileNetV2FeatureExtractor"]
34
+ _import_structure["image_processing_mobilenet_v2"] = ["MobileNetV2ImageProcessor"]
35
+
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_mobilenet_v2"] = [
44
+ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "MobileNetV2ForImageClassification",
46
+ "MobileNetV2ForSemanticSegmentation",
47
+ "MobileNetV2Model",
48
+ "MobileNetV2PreTrainedModel",
49
+ "load_tf_weights_in_mobilenet_v2",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_mobilenet_v2 import (
55
+ MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ MobileNetV2Config,
57
+ MobileNetV2OnnxConfig,
58
+ )
59
+
60
+ try:
61
+ if not is_vision_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .feature_extraction_mobilenet_v2 import MobileNetV2FeatureExtractor
67
+ from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor
68
+
69
+ try:
70
+ if not is_torch_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_mobilenet_v2 import (
76
+ MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ MobileNetV2ForImageClassification,
78
+ MobileNetV2ForSemanticSegmentation,
79
+ MobileNetV2Model,
80
+ MobileNetV2PreTrainedModel,
81
+ load_tf_weights_in_mobilenet_v2,
82
+ )
83
+
84
+
85
+ else:
86
+ import sys
87
+
88
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc ADDED
Binary file (6.52 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/feature_extraction_mobilenet_v2.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/image_processing_mobilenet_v2.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/modeling_mobilenet_v2.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileNetV2 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class MobileNetV2Config(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a
36
+ MobileNetV2 model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the MobileNetV2
38
+ [google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of input channels.
46
+ image_size (`int`, *optional*, defaults to 224):
47
+ The size (resolution) of each image.
48
+ depth_multiplier (`float`, *optional*, defaults to 1.0):
49
+ Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
50
+ channels. This is sometimes also called "alpha" or "width multiplier".
51
+ depth_divisible_by (`int`, *optional*, defaults to 8):
52
+ The number of channels in each layer will always be a multiple of this number.
53
+ min_depth (`int`, *optional*, defaults to 8):
54
+ All layers will have at least this many channels.
55
+ expand_ratio (`float`, *optional*, defaults to 6.0):
56
+ The number of output channels of the first layer in each block is input channels times expansion ratio.
57
+ output_stride (`int`, *optional*, defaults to 32):
58
+ The ratio between the spatial resolution of the input and output feature maps. By default the model reduces
59
+ the input dimensions by a factor of 32. If `output_stride` is 8 or 16, the model uses dilated convolutions
60
+ on the depthwise layers instead of regular convolutions, so that the feature maps never become more than 8x
61
+ or 16x smaller than the input image.
62
+ first_layer_is_expansion (`bool`, *optional*, defaults to `True`):
63
+ True if the very first convolution layer is also the expansion layer for the first expansion block.
64
+ finegrained_output (`bool`, *optional*, defaults to `True`):
65
+ If true, the number of output channels in the final convolution layer will stay large (1280) even if
66
+ `depth_multiplier` is less than 1.
67
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
68
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
69
+ tf_padding (`bool`, *optional*, defaults to `True`):
70
+ Whether to use TensorFlow padding rules on the convolution layers.
71
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.8):
72
+ The dropout ratio for attached classifiers.
73
+ initializer_range (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ layer_norm_eps (`float`, *optional*, defaults to 0.001):
76
+ The epsilon used by the layer normalization layers.
77
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
78
+ The index that is ignored by the loss function of the semantic segmentation model.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import MobileNetV2Config, MobileNetV2Model
84
+
85
+ >>> # Initializing a "mobilenet_v2_1.0_224" style configuration
86
+ >>> configuration = MobileNetV2Config()
87
+
88
+ >>> # Initializing a model from the "mobilenet_v2_1.0_224" style configuration
89
+ >>> model = MobileNetV2Model(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "mobilenet_v2"
96
+
97
+ def __init__(
98
+ self,
99
+ num_channels=3,
100
+ image_size=224,
101
+ depth_multiplier=1.0,
102
+ depth_divisible_by=8,
103
+ min_depth=8,
104
+ expand_ratio=6.0,
105
+ output_stride=32,
106
+ first_layer_is_expansion=True,
107
+ finegrained_output=True,
108
+ hidden_act="relu6",
109
+ tf_padding=True,
110
+ classifier_dropout_prob=0.8,
111
+ initializer_range=0.02,
112
+ layer_norm_eps=0.001,
113
+ semantic_loss_ignore_index=255,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(**kwargs)
117
+
118
+ if depth_multiplier <= 0:
119
+ raise ValueError("depth_multiplier must be greater than zero.")
120
+
121
+ self.num_channels = num_channels
122
+ self.image_size = image_size
123
+ self.depth_multiplier = depth_multiplier
124
+ self.depth_divisible_by = depth_divisible_by
125
+ self.min_depth = min_depth
126
+ self.expand_ratio = expand_ratio
127
+ self.output_stride = output_stride
128
+ self.first_layer_is_expansion = first_layer_is_expansion
129
+ self.finegrained_output = finegrained_output
130
+ self.hidden_act = hidden_act
131
+ self.tf_padding = tf_padding
132
+ self.classifier_dropout_prob = classifier_dropout_prob
133
+ self.initializer_range = initializer_range
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
136
+
137
+
138
+ class MobileNetV2OnnxConfig(OnnxConfig):
139
+ torch_onnx_minimum_version = version.parse("1.11")
140
+
141
+ @property
142
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
143
+ return OrderedDict([("pixel_values", {0: "batch"})])
144
+
145
+ @property
146
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
147
+ if self.task == "image-classification":
148
+ return OrderedDict([("logits", {0: "batch"})])
149
+ else:
150
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
151
+
152
+ @property
153
+ def atol_for_validation(self) -> float:
154
+ return 1e-4
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MobileNetV2 checkpoints from the tensorflow/models library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import re
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ MobileNetV2Config,
30
+ MobileNetV2ForImageClassification,
31
+ MobileNetV2ForSemanticSegmentation,
32
+ MobileNetV2ImageProcessor,
33
+ load_tf_weights_in_mobilenet_v2,
34
+ )
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ def get_mobilenet_v2_config(model_name):
43
+ config = MobileNetV2Config(layer_norm_eps=0.001)
44
+
45
+ if "quant" in model_name:
46
+ raise ValueError("Quantized models are not supported.")
47
+
48
+ matches = re.match(r"^.*mobilenet_v2_([^_]*)_([^_]*)$", model_name)
49
+ if matches:
50
+ config.depth_multiplier = float(matches[1])
51
+ config.image_size = int(matches[2])
52
+
53
+ if model_name.startswith("deeplabv3_"):
54
+ config.output_stride = 8
55
+ config.num_labels = 21
56
+ filename = "pascal-voc-id2label.json"
57
+ else:
58
+ # The TensorFlow version of MobileNetV2 predicts 1001 classes instead
59
+ # of the usual 1000. The first class (index 0) is "background".
60
+ config.num_labels = 1001
61
+ filename = "imagenet-1k-id2label.json"
62
+
63
+ repo_id = "huggingface/label-files"
64
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
65
+
66
+ if config.num_labels == 1001:
67
+ id2label = {int(k) + 1: v for k, v in id2label.items()}
68
+ id2label[0] = "background"
69
+ else:
70
+ id2label = {int(k): v for k, v in id2label.items()}
71
+
72
+ config.id2label = id2label
73
+ config.label2id = {v: k for k, v in id2label.items()}
74
+
75
+ return config
76
+
77
+
78
+ # We will verify our results on an image of cute cats
79
+ def prepare_img():
80
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
81
+ im = Image.open(requests.get(url, stream=True).raw)
82
+ return im
83
+
84
+
85
+ @torch.no_grad()
86
+ def convert_movilevit_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
87
+ """
88
+ Copy/paste/tweak model's weights to our MobileNetV2 structure.
89
+ """
90
+ config = get_mobilenet_v2_config(model_name)
91
+
92
+ # Load 🤗 model
93
+ if model_name.startswith("deeplabv3_"):
94
+ model = MobileNetV2ForSemanticSegmentation(config).eval()
95
+ else:
96
+ model = MobileNetV2ForImageClassification(config).eval()
97
+
98
+ # Load weights from TensorFlow checkpoint
99
+ load_tf_weights_in_mobilenet_v2(model, config, checkpoint_path)
100
+
101
+ # Check outputs on an image, prepared by MobileNetV2ImageProcessor
102
+ image_processor = MobileNetV2ImageProcessor(
103
+ crop_size={"width": config.image_size, "height": config.image_size},
104
+ size={"shortest_edge": config.image_size + 32},
105
+ )
106
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
107
+ outputs = model(**encoding)
108
+ logits = outputs.logits
109
+
110
+ if model_name.startswith("deeplabv3_"):
111
+ assert logits.shape == (1, 21, 65, 65)
112
+
113
+ if model_name == "deeplabv3_mobilenet_v2_1.0_513":
114
+ expected_logits = torch.tensor(
115
+ [
116
+ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
117
+ [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
118
+ [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
119
+ ]
120
+ )
121
+
122
+ else:
123
+ raise ValueError(f"Unknown model name: {model_name}")
124
+
125
+ assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
126
+ else:
127
+ assert logits.shape == (1, 1001)
128
+
129
+ if model_name == "mobilenet_v2_1.4_224":
130
+ expected_logits = torch.tensor([0.0181, -1.0015, 0.4688])
131
+ elif model_name == "mobilenet_v2_1.0_224":
132
+ expected_logits = torch.tensor([0.2445, -1.1993, 0.1905])
133
+ elif model_name == "mobilenet_v2_0.75_160":
134
+ expected_logits = torch.tensor([0.2482, 0.4136, 0.6669])
135
+ elif model_name == "mobilenet_v2_0.35_96":
136
+ expected_logits = torch.tensor([0.1451, -0.4624, 0.7192])
137
+ else:
138
+ expected_logits = None
139
+
140
+ if expected_logits is not None:
141
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
142
+
143
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
144
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
145
+ model.save_pretrained(pytorch_dump_folder_path)
146
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
147
+ image_processor.save_pretrained(pytorch_dump_folder_path)
148
+
149
+ if push_to_hub:
150
+ print("Pushing to the hub...")
151
+ repo_id = "google/" + model_name
152
+ image_processor.push_to_hub(repo_id)
153
+ model.push_to_hub(repo_id)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ parser = argparse.ArgumentParser()
158
+ # Required parameters
159
+ parser.add_argument(
160
+ "--model_name",
161
+ default="mobilenet_v2_1.0_224",
162
+ type=str,
163
+ help="Name of the MobileNetV2 model you'd like to convert. Should in the form 'mobilenet_v2_<depth>_<size>'.",
164
+ )
165
+ parser.add_argument(
166
+ "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
167
+ )
168
+ parser.add_argument(
169
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
170
+ )
171
+ parser.add_argument(
172
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
173
+ )
174
+
175
+ args = parser.parse_args()
176
+ convert_movilevit_checkpoint(
177
+ args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
178
+ )
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for MobileNetV2."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MobileNetV2FeatureExtractor(MobileNetV2ImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class MobileNetV2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use MobileNetV2ImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for MobileNetV2."""
16
+
17
+ from typing import Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_STANDARD_MEAN,
29
+ IMAGENET_STANDARD_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
42
+
43
+
44
+ if is_torch_available():
45
+ import torch
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ class MobileNetV2ImageProcessor(BaseImageProcessor):
52
+ r"""
53
+ Constructs a MobileNetV2 image processor.
54
+
55
+ Args:
56
+ do_resize (`bool`, *optional*, defaults to `True`):
57
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
58
+ `do_resize` in the `preprocess` method.
59
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
60
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
61
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
62
+ method.
63
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
64
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
65
+ `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
68
+ is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
69
+ `preprocess` method.
70
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
71
+ Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
72
+ Can be overridden by the `crop_size` parameter in the `preprocess` method.
73
+ do_rescale (`bool`, *optional*, defaults to `True`):
74
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
75
+ parameter in the `preprocess` method.
76
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
77
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
78
+ `preprocess` method.
79
+ do_normalize:
80
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
81
+ method.
82
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
83
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
84
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
85
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
86
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
87
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
88
+ """
89
+
90
+ model_input_names = ["pixel_values"]
91
+
92
+ def __init__(
93
+ self,
94
+ do_resize: bool = True,
95
+ size: Optional[Dict[str, int]] = None,
96
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
97
+ do_center_crop: bool = True,
98
+ crop_size: Dict[str, int] = None,
99
+ do_rescale: bool = True,
100
+ rescale_factor: Union[int, float] = 1 / 255,
101
+ do_normalize: bool = True,
102
+ image_mean: Optional[Union[float, List[float]]] = None,
103
+ image_std: Optional[Union[float, List[float]]] = None,
104
+ **kwargs,
105
+ ) -> None:
106
+ super().__init__(**kwargs)
107
+ size = size if size is not None else {"shortest_edge": 256}
108
+ size = get_size_dict(size, default_to_square=False)
109
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
110
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
111
+ self.do_resize = do_resize
112
+ self.size = size
113
+ self.resample = resample
114
+ self.do_center_crop = do_center_crop
115
+ self.crop_size = crop_size
116
+ self.do_rescale = do_rescale
117
+ self.rescale_factor = rescale_factor
118
+ self.do_normalize = do_normalize
119
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
120
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
121
+ self._valid_processor_keys = [
122
+ "images",
123
+ "do_resize",
124
+ "size",
125
+ "resample",
126
+ "do_center_crop",
127
+ "crop_size",
128
+ "do_rescale",
129
+ "rescale_factor",
130
+ "do_normalize",
131
+ "image_mean",
132
+ "image_std",
133
+ "return_tensors",
134
+ "data_format",
135
+ "input_data_format",
136
+ ]
137
+
138
+ # Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize
139
+ def resize(
140
+ self,
141
+ image: np.ndarray,
142
+ size: Dict[str, int],
143
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
144
+ data_format: Optional[Union[str, ChannelDimension]] = None,
145
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
146
+ **kwargs,
147
+ ) -> np.ndarray:
148
+ """
149
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
150
+ resized to keep the input aspect ratio.
151
+
152
+ Args:
153
+ image (`np.ndarray`):
154
+ Image to resize.
155
+ size (`Dict[str, int]`):
156
+ Size of the output image.
157
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
158
+ Resampling filter to use when resiizing the image.
159
+ data_format (`str` or `ChannelDimension`, *optional*):
160
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
161
+ input_data_format (`ChannelDimension` or `str`, *optional*):
162
+ The channel dimension format of the input image. If not provided, it will be inferred.
163
+ """
164
+ default_to_square = True
165
+ if "shortest_edge" in size:
166
+ size = size["shortest_edge"]
167
+ default_to_square = False
168
+ elif "height" in size and "width" in size:
169
+ size = (size["height"], size["width"])
170
+ else:
171
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
172
+
173
+ output_size = get_resize_output_image_size(
174
+ image,
175
+ size=size,
176
+ default_to_square=default_to_square,
177
+ input_data_format=input_data_format,
178
+ )
179
+ return resize(
180
+ image,
181
+ size=output_size,
182
+ resample=resample,
183
+ data_format=data_format,
184
+ input_data_format=input_data_format,
185
+ **kwargs,
186
+ )
187
+
188
+ def preprocess(
189
+ self,
190
+ images: ImageInput,
191
+ do_resize: Optional[bool] = None,
192
+ size: Dict[str, int] = None,
193
+ resample: PILImageResampling = None,
194
+ do_center_crop: bool = None,
195
+ crop_size: Dict[str, int] = None,
196
+ do_rescale: Optional[bool] = None,
197
+ rescale_factor: Optional[float] = None,
198
+ do_normalize: Optional[bool] = None,
199
+ image_mean: Optional[Union[float, List[float]]] = None,
200
+ image_std: Optional[Union[float, List[float]]] = None,
201
+ return_tensors: Optional[Union[str, TensorType]] = None,
202
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
203
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
204
+ **kwargs,
205
+ ):
206
+ """
207
+ Preprocess an image or batch of images.
208
+
209
+ Args:
210
+ images (`ImageInput`):
211
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
212
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
213
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
214
+ Whether to resize the image.
215
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
216
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
217
+ the longest edge resized to keep the input aspect ratio.
218
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
219
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
220
+ an effect if `do_resize` is set to `True`.
221
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
222
+ Whether to center crop the image.
223
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
224
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
225
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
226
+ Whether to rescale the image values between [0 - 1].
227
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
228
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
229
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
230
+ Whether to normalize the image.
231
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
232
+ Image mean to use if `do_normalize` is set to `True`.
233
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
234
+ Image standard deviation to use if `do_normalize` is set to `True`.
235
+ return_tensors (`str` or `TensorType`, *optional*):
236
+ The type of tensors to return. Can be one of:
237
+ - Unset: Return a list of `np.ndarray`.
238
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
239
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
240
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
241
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
242
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
243
+ The channel dimension format for the output image. Can be one of:
244
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
245
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
246
+ - Unset: Use the channel dimension format of the input image.
247
+ input_data_format (`ChannelDimension` or `str`, *optional*):
248
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
249
+ from the input image. Can be one of:
250
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
251
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
252
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
253
+ """
254
+ do_resize = do_resize if do_resize is not None else self.do_resize
255
+ size = size if size is not None else self.size
256
+ size = get_size_dict(size, default_to_square=False)
257
+ resample = resample if resample is not None else self.resample
258
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
259
+ crop_size = crop_size if crop_size is not None else self.crop_size
260
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
261
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
262
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
263
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
264
+ image_mean = image_mean if image_mean is not None else self.image_mean
265
+ image_std = image_std if image_std is not None else self.image_std
266
+
267
+ images = make_list_of_images(images)
268
+
269
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
270
+
271
+ if not valid_images(images):
272
+ raise ValueError(
273
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
274
+ "torch.Tensor, tf.Tensor or jax.ndarray."
275
+ )
276
+ validate_preprocess_arguments(
277
+ do_rescale=do_rescale,
278
+ rescale_factor=rescale_factor,
279
+ do_normalize=do_normalize,
280
+ image_mean=image_mean,
281
+ image_std=image_std,
282
+ do_center_crop=do_center_crop,
283
+ crop_size=crop_size,
284
+ do_resize=do_resize,
285
+ size=size,
286
+ resample=resample,
287
+ )
288
+ # All transformations expect numpy arrays.
289
+ images = [to_numpy_array(image) for image in images]
290
+
291
+ if is_scaled_image(images[0]) and do_rescale:
292
+ logger.warning_once(
293
+ "It looks like you are trying to rescale already rescaled images. If the input"
294
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
295
+ )
296
+
297
+ if input_data_format is None:
298
+ # We assume that all images have the same channel dimension format.
299
+ input_data_format = infer_channel_dimension_format(images[0])
300
+
301
+ if do_resize:
302
+ images = [
303
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
304
+ for image in images
305
+ ]
306
+
307
+ if do_center_crop:
308
+ images = [
309
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
310
+ ]
311
+
312
+ if do_rescale:
313
+ images = [
314
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
315
+ for image in images
316
+ ]
317
+
318
+ if do_normalize:
319
+ images = [
320
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
321
+ for image in images
322
+ ]
323
+
324
+ images = [
325
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
326
+ ]
327
+
328
+ data = {"pixel_values": images}
329
+ return BatchFeature(data=data, tensor_type=return_tensors)
330
+
331
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileNetV2
332
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
333
+ """
334
+ Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
335
+
336
+ Args:
337
+ outputs ([`MobileNetV2ForSemanticSegmentation`]):
338
+ Raw outputs of the model.
339
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
340
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
341
+ predictions will not be resized.
342
+
343
+ Returns:
344
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
345
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
346
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
347
+ """
348
+ # TODO: add support for other frameworks
349
+ logits = outputs.logits
350
+
351
+ # Resize logits and compute semantic segmentation maps
352
+ if target_sizes is not None:
353
+ if len(logits) != len(target_sizes):
354
+ raise ValueError(
355
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
356
+ )
357
+
358
+ if is_torch_tensor(target_sizes):
359
+ target_sizes = target_sizes.numpy()
360
+
361
+ semantic_segmentation = []
362
+
363
+ for idx in range(len(logits)):
364
+ resized_logits = torch.nn.functional.interpolate(
365
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
366
+ )
367
+ semantic_map = resized_logits[0].argmax(dim=0)
368
+ semantic_segmentation.append(semantic_map)
369
+ else:
370
+ semantic_segmentation = logits.argmax(dim=1)
371
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
372
+
373
+ return semantic_segmentation
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch MobileNetV2 model."""
16
+
17
+
18
+ from typing import Optional, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutputWithPoolingAndNoAttention,
27
+ ImageClassifierOutputWithNoAttention,
28
+ SemanticSegmenterOutput,
29
+ )
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import (
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_mobilenet_v2 import MobileNetV2Config
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ # General docstring
45
+ _CONFIG_FOR_DOC = "MobileNetV2Config"
46
+
47
+ # Base docstring
48
+ _CHECKPOINT_FOR_DOC = "google/mobilenet_v2_1.0_224"
49
+ _EXPECTED_OUTPUT_SHAPE = [1, 1280, 7, 7]
50
+
51
+ # Image classification docstring
52
+ _IMAGE_CLASS_CHECKPOINT = "google/mobilenet_v2_1.0_224"
53
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
54
+
55
+
56
+ from ..deprecated._archive_maps import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ def _build_tf_to_pytorch_map(model, config, tf_weights=None):
60
+ """
61
+ A map of modules from TF to PyTorch.
62
+ """
63
+
64
+ tf_to_pt_map = {}
65
+
66
+ if isinstance(model, (MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation)):
67
+ backbone = model.mobilenet_v2
68
+ else:
69
+ backbone = model
70
+
71
+ # Use the EMA weights if available
72
+ def ema(x):
73
+ return x + "/ExponentialMovingAverage" if x + "/ExponentialMovingAverage" in tf_weights else x
74
+
75
+ prefix = "MobilenetV2/Conv/"
76
+ tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.first_conv.convolution.weight
77
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.first_conv.normalization.bias
78
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.first_conv.normalization.weight
79
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.first_conv.normalization.running_mean
80
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.first_conv.normalization.running_var
81
+
82
+ prefix = "MobilenetV2/expanded_conv/depthwise/"
83
+ tf_to_pt_map[ema(prefix + "depthwise_weights")] = backbone.conv_stem.conv_3x3.convolution.weight
84
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.conv_3x3.normalization.bias
85
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.conv_3x3.normalization.weight
86
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.conv_3x3.normalization.running_mean
87
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.conv_3x3.normalization.running_var
88
+
89
+ prefix = "MobilenetV2/expanded_conv/project/"
90
+ tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.reduce_1x1.convolution.weight
91
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.reduce_1x1.normalization.bias
92
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.reduce_1x1.normalization.weight
93
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.reduce_1x1.normalization.running_mean
94
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.reduce_1x1.normalization.running_var
95
+
96
+ for i in range(16):
97
+ tf_index = i + 1
98
+ pt_index = i
99
+ pointer = backbone.layer[pt_index]
100
+
101
+ prefix = f"MobilenetV2/expanded_conv_{tf_index}/expand/"
102
+ tf_to_pt_map[ema(prefix + "weights")] = pointer.expand_1x1.convolution.weight
103
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.expand_1x1.normalization.bias
104
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.expand_1x1.normalization.weight
105
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.expand_1x1.normalization.running_mean
106
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.expand_1x1.normalization.running_var
107
+
108
+ prefix = f"MobilenetV2/expanded_conv_{tf_index}/depthwise/"
109
+ tf_to_pt_map[ema(prefix + "depthwise_weights")] = pointer.conv_3x3.convolution.weight
110
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.conv_3x3.normalization.bias
111
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.conv_3x3.normalization.weight
112
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.conv_3x3.normalization.running_mean
113
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.conv_3x3.normalization.running_var
114
+
115
+ prefix = f"MobilenetV2/expanded_conv_{tf_index}/project/"
116
+ tf_to_pt_map[ema(prefix + "weights")] = pointer.reduce_1x1.convolution.weight
117
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.reduce_1x1.normalization.bias
118
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.reduce_1x1.normalization.weight
119
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.reduce_1x1.normalization.running_mean
120
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.reduce_1x1.normalization.running_var
121
+
122
+ prefix = "MobilenetV2/Conv_1/"
123
+ tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_1x1.convolution.weight
124
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_1x1.normalization.bias
125
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_1x1.normalization.weight
126
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_1x1.normalization.running_mean
127
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_1x1.normalization.running_var
128
+
129
+ if isinstance(model, MobileNetV2ForImageClassification):
130
+ prefix = "MobilenetV2/Logits/Conv2d_1c_1x1/"
131
+ tf_to_pt_map[ema(prefix + "weights")] = model.classifier.weight
132
+ tf_to_pt_map[ema(prefix + "biases")] = model.classifier.bias
133
+
134
+ if isinstance(model, MobileNetV2ForSemanticSegmentation):
135
+ prefix = "image_pooling/"
136
+ tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_pool.convolution.weight
137
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_pool.normalization.bias
138
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_pool.normalization.weight
139
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_pool.normalization.running_mean
140
+ tf_to_pt_map[
141
+ prefix + "BatchNorm/moving_variance"
142
+ ] = model.segmentation_head.conv_pool.normalization.running_var
143
+
144
+ prefix = "aspp0/"
145
+ tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_aspp.convolution.weight
146
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_aspp.normalization.bias
147
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_aspp.normalization.weight
148
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_aspp.normalization.running_mean
149
+ tf_to_pt_map[
150
+ prefix + "BatchNorm/moving_variance"
151
+ ] = model.segmentation_head.conv_aspp.normalization.running_var
152
+
153
+ prefix = "concat_projection/"
154
+ tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_projection.convolution.weight
155
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_projection.normalization.bias
156
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_projection.normalization.weight
157
+ tf_to_pt_map[
158
+ prefix + "BatchNorm/moving_mean"
159
+ ] = model.segmentation_head.conv_projection.normalization.running_mean
160
+ tf_to_pt_map[
161
+ prefix + "BatchNorm/moving_variance"
162
+ ] = model.segmentation_head.conv_projection.normalization.running_var
163
+
164
+ prefix = "logits/semantic/"
165
+ tf_to_pt_map[ema(prefix + "weights")] = model.segmentation_head.classifier.convolution.weight
166
+ tf_to_pt_map[ema(prefix + "biases")] = model.segmentation_head.classifier.convolution.bias
167
+
168
+ return tf_to_pt_map
169
+
170
+
171
+ def load_tf_weights_in_mobilenet_v2(model, config, tf_checkpoint_path):
172
+ """Load TensorFlow checkpoints in a PyTorch model."""
173
+ try:
174
+ import numpy as np
175
+ import tensorflow as tf
176
+ except ImportError:
177
+ logger.error(
178
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
179
+ "https://www.tensorflow.org/install/ for installation instructions."
180
+ )
181
+ raise
182
+
183
+ # Load weights from TF model
184
+ init_vars = tf.train.list_variables(tf_checkpoint_path)
185
+ tf_weights = {}
186
+ for name, shape in init_vars:
187
+ logger.info(f"Loading TF weight {name} with shape {shape}")
188
+ array = tf.train.load_variable(tf_checkpoint_path, name)
189
+ tf_weights[name] = array
190
+
191
+ # Build TF to PyTorch weights loading map
192
+ tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights)
193
+
194
+ for name, pointer in tf_to_pt_map.items():
195
+ logger.info(f"Importing {name}")
196
+ if name not in tf_weights:
197
+ logger.info(f"{name} not in tf pre-trained weights, skipping")
198
+ continue
199
+
200
+ array = tf_weights[name]
201
+
202
+ if "depthwise_weights" in name:
203
+ logger.info("Transposing depthwise")
204
+ array = np.transpose(array, (2, 3, 0, 1))
205
+ elif "weights" in name:
206
+ logger.info("Transposing")
207
+ if len(pointer.shape) == 2: # copying into linear layer
208
+ array = array.squeeze().transpose()
209
+ else:
210
+ array = np.transpose(array, (3, 2, 0, 1))
211
+
212
+ if pointer.shape != array.shape:
213
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
214
+
215
+ logger.info(f"Initialize PyTorch weight {name} {array.shape}")
216
+ pointer.data = torch.from_numpy(array)
217
+
218
+ tf_weights.pop(name, None)
219
+ tf_weights.pop(name + "/RMSProp", None)
220
+ tf_weights.pop(name + "/RMSProp_1", None)
221
+ tf_weights.pop(name + "/ExponentialMovingAverage", None)
222
+ tf_weights.pop(name + "/Momentum", None)
223
+
224
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
225
+ return model
226
+
227
+
228
+ def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
229
+ """
230
+ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
231
+ original TensorFlow repo. It can be seen here:
232
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
233
+ """
234
+ if min_value is None:
235
+ min_value = divisor
236
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
237
+ # Make sure that round down does not go down by more than 10%.
238
+ if new_value < 0.9 * value:
239
+ new_value += divisor
240
+ return int(new_value)
241
+
242
+
243
+ def apply_depth_multiplier(config: MobileNetV2Config, channels: int) -> int:
244
+ return make_divisible(int(round(channels * config.depth_multiplier)), config.depth_divisible_by, config.min_depth)
245
+
246
+
247
+ def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor:
248
+ """
249
+ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at:
250
+ https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2
251
+ """
252
+ in_height = int(features.shape[-2])
253
+ in_width = int(features.shape[-1])
254
+ stride_height, stride_width = conv_layer.stride
255
+ kernel_height, kernel_width = conv_layer.kernel_size
256
+ dilation_height, dilation_width = conv_layer.dilation
257
+
258
+ if in_height % stride_height == 0:
259
+ pad_along_height = max(kernel_height - stride_height, 0)
260
+ else:
261
+ pad_along_height = max(kernel_height - (in_height % stride_height), 0)
262
+
263
+ if in_width % stride_width == 0:
264
+ pad_along_width = max(kernel_width - stride_width, 0)
265
+ else:
266
+ pad_along_width = max(kernel_width - (in_width % stride_width), 0)
267
+
268
+ pad_left = pad_along_width // 2
269
+ pad_right = pad_along_width - pad_left
270
+ pad_top = pad_along_height // 2
271
+ pad_bottom = pad_along_height - pad_top
272
+
273
+ padding = (
274
+ pad_left * dilation_width,
275
+ pad_right * dilation_width,
276
+ pad_top * dilation_height,
277
+ pad_bottom * dilation_height,
278
+ )
279
+ return nn.functional.pad(features, padding, "constant", 0.0)
280
+
281
+
282
+ class MobileNetV2ConvLayer(nn.Module):
283
+ def __init__(
284
+ self,
285
+ config: MobileNetV2Config,
286
+ in_channels: int,
287
+ out_channels: int,
288
+ kernel_size: int,
289
+ stride: int = 1,
290
+ groups: int = 1,
291
+ bias: bool = False,
292
+ dilation: int = 1,
293
+ use_normalization: bool = True,
294
+ use_activation: Union[bool, str] = True,
295
+ layer_norm_eps: Optional[float] = None,
296
+ ) -> None:
297
+ super().__init__()
298
+ self.config = config
299
+
300
+ if in_channels % groups != 0:
301
+ raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
302
+ if out_channels % groups != 0:
303
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
304
+
305
+ padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) * dilation
306
+
307
+ self.convolution = nn.Conv2d(
308
+ in_channels=in_channels,
309
+ out_channels=out_channels,
310
+ kernel_size=kernel_size,
311
+ stride=stride,
312
+ padding=padding,
313
+ dilation=dilation,
314
+ groups=groups,
315
+ bias=bias,
316
+ padding_mode="zeros",
317
+ )
318
+
319
+ if use_normalization:
320
+ self.normalization = nn.BatchNorm2d(
321
+ num_features=out_channels,
322
+ eps=config.layer_norm_eps if layer_norm_eps is None else layer_norm_eps,
323
+ momentum=0.997,
324
+ affine=True,
325
+ track_running_stats=True,
326
+ )
327
+ else:
328
+ self.normalization = None
329
+
330
+ if use_activation:
331
+ if isinstance(use_activation, str):
332
+ self.activation = ACT2FN[use_activation]
333
+ elif isinstance(config.hidden_act, str):
334
+ self.activation = ACT2FN[config.hidden_act]
335
+ else:
336
+ self.activation = config.hidden_act
337
+ else:
338
+ self.activation = None
339
+
340
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
341
+ if self.config.tf_padding:
342
+ features = apply_tf_padding(features, self.convolution)
343
+ features = self.convolution(features)
344
+ if self.normalization is not None:
345
+ features = self.normalization(features)
346
+ if self.activation is not None:
347
+ features = self.activation(features)
348
+ return features
349
+
350
+
351
+ class MobileNetV2InvertedResidual(nn.Module):
352
+ def __init__(
353
+ self, config: MobileNetV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
354
+ ) -> None:
355
+ super().__init__()
356
+
357
+ expanded_channels = make_divisible(
358
+ int(round(in_channels * config.expand_ratio)), config.depth_divisible_by, config.min_depth
359
+ )
360
+
361
+ if stride not in [1, 2]:
362
+ raise ValueError(f"Invalid stride {stride}.")
363
+
364
+ self.use_residual = (stride == 1) and (in_channels == out_channels)
365
+
366
+ self.expand_1x1 = MobileNetV2ConvLayer(
367
+ config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
368
+ )
369
+
370
+ self.conv_3x3 = MobileNetV2ConvLayer(
371
+ config,
372
+ in_channels=expanded_channels,
373
+ out_channels=expanded_channels,
374
+ kernel_size=3,
375
+ stride=stride,
376
+ groups=expanded_channels,
377
+ dilation=dilation,
378
+ )
379
+
380
+ self.reduce_1x1 = MobileNetV2ConvLayer(
381
+ config,
382
+ in_channels=expanded_channels,
383
+ out_channels=out_channels,
384
+ kernel_size=1,
385
+ use_activation=False,
386
+ )
387
+
388
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
389
+ residual = features
390
+
391
+ features = self.expand_1x1(features)
392
+ features = self.conv_3x3(features)
393
+ features = self.reduce_1x1(features)
394
+
395
+ return residual + features if self.use_residual else features
396
+
397
+
398
+ class MobileNetV2Stem(nn.Module):
399
+ def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None:
400
+ super().__init__()
401
+
402
+ # The very first layer is a regular 3x3 convolution with stride 2 that expands to 32 channels.
403
+ # All other expansion layers use the expansion factor to compute the number of output channels.
404
+ self.first_conv = MobileNetV2ConvLayer(
405
+ config,
406
+ in_channels=in_channels,
407
+ out_channels=expanded_channels,
408
+ kernel_size=3,
409
+ stride=2,
410
+ )
411
+
412
+ if config.first_layer_is_expansion:
413
+ self.expand_1x1 = None
414
+ else:
415
+ self.expand_1x1 = MobileNetV2ConvLayer(
416
+ config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=1
417
+ )
418
+
419
+ self.conv_3x3 = MobileNetV2ConvLayer(
420
+ config,
421
+ in_channels=expanded_channels,
422
+ out_channels=expanded_channels,
423
+ kernel_size=3,
424
+ stride=1,
425
+ groups=expanded_channels,
426
+ )
427
+
428
+ self.reduce_1x1 = MobileNetV2ConvLayer(
429
+ config,
430
+ in_channels=expanded_channels,
431
+ out_channels=out_channels,
432
+ kernel_size=1,
433
+ use_activation=False,
434
+ )
435
+
436
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
437
+ features = self.first_conv(features)
438
+ if self.expand_1x1 is not None:
439
+ features = self.expand_1x1(features)
440
+ features = self.conv_3x3(features)
441
+ features = self.reduce_1x1(features)
442
+ return features
443
+
444
+
445
+ class MobileNetV2PreTrainedModel(PreTrainedModel):
446
+ """
447
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
448
+ models.
449
+ """
450
+
451
+ config_class = MobileNetV2Config
452
+ load_tf_weights = load_tf_weights_in_mobilenet_v2
453
+ base_model_prefix = "mobilenet_v2"
454
+ main_input_name = "pixel_values"
455
+ supports_gradient_checkpointing = False
456
+
457
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
458
+ """Initialize the weights"""
459
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
460
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
461
+ if module.bias is not None:
462
+ module.bias.data.zero_()
463
+ elif isinstance(module, nn.BatchNorm2d):
464
+ module.bias.data.zero_()
465
+ module.weight.data.fill_(1.0)
466
+
467
+
468
+ MOBILENET_V2_START_DOCSTRING = r"""
469
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
470
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
471
+ behavior.
472
+
473
+ Parameters:
474
+ config ([`MobileNetV2Config`]): Model configuration class with all the parameters of the model.
475
+ Initializing with a config file does not load the weights associated with the model, only the
476
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
477
+ """
478
+
479
+ MOBILENET_V2_INPUTS_DOCSTRING = r"""
480
+ Args:
481
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
482
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
483
+ [`MobileNetV2ImageProcessor.__call__`] for details.
484
+ output_hidden_states (`bool`, *optional*):
485
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
486
+ more detail.
487
+ return_dict (`bool`, *optional*):
488
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
489
+ """
490
+
491
+
492
+ @add_start_docstrings(
493
+ "The bare MobileNetV2 model outputting raw hidden-states without any specific head on top.",
494
+ MOBILENET_V2_START_DOCSTRING,
495
+ )
496
+ class MobileNetV2Model(MobileNetV2PreTrainedModel):
497
+ def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool = True):
498
+ super().__init__(config)
499
+ self.config = config
500
+
501
+ # Output channels for the projection layers
502
+ channels = [16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320]
503
+ channels = [apply_depth_multiplier(config, x) for x in channels]
504
+
505
+ # Strides for the depthwise layers
506
+ strides = [2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]
507
+
508
+ self.conv_stem = MobileNetV2Stem(
509
+ config,
510
+ in_channels=config.num_channels,
511
+ expanded_channels=apply_depth_multiplier(config, 32),
512
+ out_channels=channels[0],
513
+ )
514
+
515
+ current_stride = 2 # first conv layer has stride 2
516
+ dilation = 1
517
+
518
+ self.layer = nn.ModuleList()
519
+ for i in range(16):
520
+ # Keep making the feature maps smaller or use dilated convolution?
521
+ if current_stride == config.output_stride:
522
+ layer_stride = 1
523
+ layer_dilation = dilation
524
+ dilation *= strides[i] # larger dilation starts in next block
525
+ else:
526
+ layer_stride = strides[i]
527
+ layer_dilation = 1
528
+ current_stride *= layer_stride
529
+
530
+ self.layer.append(
531
+ MobileNetV2InvertedResidual(
532
+ config,
533
+ in_channels=channels[i],
534
+ out_channels=channels[i + 1],
535
+ stride=layer_stride,
536
+ dilation=layer_dilation,
537
+ )
538
+ )
539
+
540
+ if config.finegrained_output and config.depth_multiplier < 1.0:
541
+ output_channels = 1280
542
+ else:
543
+ output_channels = apply_depth_multiplier(config, 1280)
544
+
545
+ self.conv_1x1 = MobileNetV2ConvLayer(
546
+ config,
547
+ in_channels=channels[-1],
548
+ out_channels=output_channels,
549
+ kernel_size=1,
550
+ )
551
+
552
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
553
+
554
+ # Initialize weights and apply final processing
555
+ self.post_init()
556
+
557
+ def _prune_heads(self, heads_to_prune):
558
+ raise NotImplementedError
559
+
560
+ @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING)
561
+ @add_code_sample_docstrings(
562
+ checkpoint=_CHECKPOINT_FOR_DOC,
563
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
564
+ config_class=_CONFIG_FOR_DOC,
565
+ modality="vision",
566
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
567
+ )
568
+ def forward(
569
+ self,
570
+ pixel_values: Optional[torch.Tensor] = None,
571
+ output_hidden_states: Optional[bool] = None,
572
+ return_dict: Optional[bool] = None,
573
+ ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
574
+ output_hidden_states = (
575
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
576
+ )
577
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
578
+
579
+ if pixel_values is None:
580
+ raise ValueError("You have to specify pixel_values")
581
+
582
+ hidden_states = self.conv_stem(pixel_values)
583
+
584
+ all_hidden_states = () if output_hidden_states else None
585
+
586
+ for i, layer_module in enumerate(self.layer):
587
+ hidden_states = layer_module(hidden_states)
588
+
589
+ if output_hidden_states:
590
+ all_hidden_states = all_hidden_states + (hidden_states,)
591
+
592
+ last_hidden_state = self.conv_1x1(hidden_states)
593
+
594
+ if self.pooler is not None:
595
+ pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
596
+ else:
597
+ pooled_output = None
598
+
599
+ if not return_dict:
600
+ return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
601
+
602
+ return BaseModelOutputWithPoolingAndNoAttention(
603
+ last_hidden_state=last_hidden_state,
604
+ pooler_output=pooled_output,
605
+ hidden_states=all_hidden_states,
606
+ )
607
+
608
+
609
+ @add_start_docstrings(
610
+ """
611
+ MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
612
+ ImageNet.
613
+ """,
614
+ MOBILENET_V2_START_DOCSTRING,
615
+ )
616
+ class MobileNetV2ForImageClassification(MobileNetV2PreTrainedModel):
617
+ def __init__(self, config: MobileNetV2Config) -> None:
618
+ super().__init__(config)
619
+
620
+ self.num_labels = config.num_labels
621
+ self.mobilenet_v2 = MobileNetV2Model(config)
622
+
623
+ last_hidden_size = self.mobilenet_v2.conv_1x1.convolution.out_channels
624
+
625
+ # Classifier head
626
+ self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
627
+ self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
628
+
629
+ # Initialize weights and apply final processing
630
+ self.post_init()
631
+
632
+ @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING)
633
+ @add_code_sample_docstrings(
634
+ checkpoint=_CHECKPOINT_FOR_DOC,
635
+ output_type=ImageClassifierOutputWithNoAttention,
636
+ config_class=_CONFIG_FOR_DOC,
637
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
638
+ )
639
+ def forward(
640
+ self,
641
+ pixel_values: Optional[torch.Tensor] = None,
642
+ output_hidden_states: Optional[bool] = None,
643
+ labels: Optional[torch.Tensor] = None,
644
+ return_dict: Optional[bool] = None,
645
+ ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
646
+ r"""
647
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
648
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
649
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
650
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
651
+ """
652
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
653
+
654
+ outputs = self.mobilenet_v2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
655
+
656
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
657
+
658
+ logits = self.classifier(self.dropout(pooled_output))
659
+
660
+ loss = None
661
+ if labels is not None:
662
+ if self.config.problem_type is None:
663
+ if self.num_labels == 1:
664
+ self.config.problem_type = "regression"
665
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
666
+ self.config.problem_type = "single_label_classification"
667
+ else:
668
+ self.config.problem_type = "multi_label_classification"
669
+
670
+ if self.config.problem_type == "regression":
671
+ loss_fct = MSELoss()
672
+ if self.num_labels == 1:
673
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
674
+ else:
675
+ loss = loss_fct(logits, labels)
676
+ elif self.config.problem_type == "single_label_classification":
677
+ loss_fct = CrossEntropyLoss()
678
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
679
+ elif self.config.problem_type == "multi_label_classification":
680
+ loss_fct = BCEWithLogitsLoss()
681
+ loss = loss_fct(logits, labels)
682
+
683
+ if not return_dict:
684
+ output = (logits,) + outputs[2:]
685
+ return ((loss,) + output) if loss is not None else output
686
+
687
+ return ImageClassifierOutputWithNoAttention(
688
+ loss=loss,
689
+ logits=logits,
690
+ hidden_states=outputs.hidden_states,
691
+ )
692
+
693
+
694
+ class MobileNetV2DeepLabV3Plus(nn.Module):
695
+ """
696
+ The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image
697
+ Segmentation" https://arxiv.org/abs/1802.02611
698
+ """
699
+
700
+ def __init__(self, config: MobileNetV2Config) -> None:
701
+ super().__init__()
702
+
703
+ self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1)
704
+
705
+ self.conv_pool = MobileNetV2ConvLayer(
706
+ config,
707
+ in_channels=apply_depth_multiplier(config, 320),
708
+ out_channels=256,
709
+ kernel_size=1,
710
+ stride=1,
711
+ use_normalization=True,
712
+ use_activation="relu",
713
+ layer_norm_eps=1e-5,
714
+ )
715
+
716
+ self.conv_aspp = MobileNetV2ConvLayer(
717
+ config,
718
+ in_channels=apply_depth_multiplier(config, 320),
719
+ out_channels=256,
720
+ kernel_size=1,
721
+ stride=1,
722
+ use_normalization=True,
723
+ use_activation="relu",
724
+ layer_norm_eps=1e-5,
725
+ )
726
+
727
+ self.conv_projection = MobileNetV2ConvLayer(
728
+ config,
729
+ in_channels=512,
730
+ out_channels=256,
731
+ kernel_size=1,
732
+ stride=1,
733
+ use_normalization=True,
734
+ use_activation="relu",
735
+ layer_norm_eps=1e-5,
736
+ )
737
+
738
+ self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
739
+
740
+ self.classifier = MobileNetV2ConvLayer(
741
+ config,
742
+ in_channels=256,
743
+ out_channels=config.num_labels,
744
+ kernel_size=1,
745
+ use_normalization=False,
746
+ use_activation=False,
747
+ bias=True,
748
+ )
749
+
750
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
751
+ spatial_size = features.shape[-2:]
752
+
753
+ features_pool = self.avg_pool(features)
754
+ features_pool = self.conv_pool(features_pool)
755
+ features_pool = nn.functional.interpolate(
756
+ features_pool, size=spatial_size, mode="bilinear", align_corners=True
757
+ )
758
+
759
+ features_aspp = self.conv_aspp(features)
760
+
761
+ features = torch.cat([features_pool, features_aspp], dim=1)
762
+
763
+ features = self.conv_projection(features)
764
+ features = self.dropout(features)
765
+ features = self.classifier(features)
766
+ return features
767
+
768
+
769
+ @add_start_docstrings(
770
+ """
771
+ MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
772
+ """,
773
+ MOBILENET_V2_START_DOCSTRING,
774
+ )
775
+ class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel):
776
+ def __init__(self, config: MobileNetV2Config) -> None:
777
+ super().__init__(config)
778
+
779
+ self.num_labels = config.num_labels
780
+ self.mobilenet_v2 = MobileNetV2Model(config, add_pooling_layer=False)
781
+ self.segmentation_head = MobileNetV2DeepLabV3Plus(config)
782
+
783
+ # Initialize weights and apply final processing
784
+ self.post_init()
785
+
786
+ @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING)
787
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
788
+ def forward(
789
+ self,
790
+ pixel_values: Optional[torch.Tensor] = None,
791
+ labels: Optional[torch.Tensor] = None,
792
+ output_hidden_states: Optional[bool] = None,
793
+ return_dict: Optional[bool] = None,
794
+ ) -> Union[tuple, SemanticSegmenterOutput]:
795
+ r"""
796
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
797
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
798
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
799
+
800
+ Returns:
801
+
802
+ Examples:
803
+
804
+ ```python
805
+ >>> from transformers import AutoImageProcessor, MobileNetV2ForSemanticSegmentation
806
+ >>> from PIL import Image
807
+ >>> import requests
808
+
809
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
810
+ >>> image = Image.open(requests.get(url, stream=True).raw)
811
+
812
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
813
+ >>> model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
814
+
815
+ >>> inputs = image_processor(images=image, return_tensors="pt")
816
+
817
+ >>> with torch.no_grad():
818
+ ... outputs = model(**inputs)
819
+
820
+ >>> # logits are of shape (batch_size, num_labels, height, width)
821
+ >>> logits = outputs.logits
822
+ ```"""
823
+ output_hidden_states = (
824
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
825
+ )
826
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
827
+
828
+ outputs = self.mobilenet_v2(
829
+ pixel_values,
830
+ output_hidden_states=True, # we need the intermediate hidden states
831
+ return_dict=return_dict,
832
+ )
833
+
834
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
835
+
836
+ logits = self.segmentation_head(encoder_hidden_states[-1])
837
+
838
+ loss = None
839
+ if labels is not None:
840
+ if self.config.num_labels == 1:
841
+ raise ValueError("The number of labels should be greater than one")
842
+ else:
843
+ # upsample logits to the images' original size
844
+ upsampled_logits = nn.functional.interpolate(
845
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
846
+ )
847
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
848
+ loss = loss_fct(upsampled_logits, labels)
849
+
850
+ if not return_dict:
851
+ if output_hidden_states:
852
+ output = (logits,) + outputs[1:]
853
+ else:
854
+ output = (logits,) + outputs[2:]
855
+ return ((loss,) + output) if loss is not None else output
856
+
857
+ return SemanticSegmenterOutput(
858
+ loss=loss,
859
+ logits=logits,
860
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
861
+ attentions=None,
862
+ )
venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__init__.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ is_torchaudio_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_musicgen_melody": [
26
+ "MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP",
27
+ "MusicgenMelodyConfig",
28
+ "MusicgenMelodyDecoderConfig",
29
+ ],
30
+ }
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_musicgen_melody"] = [
39
+ "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "MusicgenMelodyForConditionalGeneration",
41
+ "MusicgenMelodyForCausalLM",
42
+ "MusicgenMelodyModel",
43
+ "MusicgenMelodyPreTrainedModel",
44
+ ]
45
+
46
+ try:
47
+ if not is_torchaudio_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ _import_structure["feature_extraction_musicgen_melody"] = ["MusicgenMelodyFeatureExtractor"]
53
+ _import_structure["processing_musicgen_melody"] = ["MusicgenMelodyProcessor"]
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from .configuration_musicgen_melody import (
58
+ MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP,
59
+ MusicgenMelodyConfig,
60
+ MusicgenMelodyDecoderConfig,
61
+ )
62
+
63
+ try:
64
+ if not is_torch_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .modeling_musicgen_melody import (
70
+ MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST,
71
+ MusicgenMelodyForCausalLM,
72
+ MusicgenMelodyForConditionalGeneration,
73
+ MusicgenMelodyModel,
74
+ MusicgenMelodyPreTrainedModel,
75
+ )
76
+
77
+ try:
78
+ if not is_torchaudio_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ from .feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor
84
+ from .processing_musicgen_melody import MusicgenMelodyProcessor
85
+
86
+
87
+ else:
88
+ import sys
89
+
90
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/configuration_musicgen_melody.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/convert_musicgen_melody_transformers.cpython-310.pyc ADDED
Binary file (7.4 kB). View file