applied-ai-018 commited on
Commit
2c059b9
·
verified ·
1 Parent(s): f22241b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py +71 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py +124 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py +1008 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py +173 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__init__.py +77 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/convert_bros_to_pytorch.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py +138 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/convert_bros_to_pytorch.py +145 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py +1318 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py +109 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py +75 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py +277 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +237 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py +43 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py +1553 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py +49 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__init__.py +58 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py +223 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py +1882 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py +170 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/luke/modeling_luke.py +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/nystromformer/configuration_nystromformer.py +132 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/__init__.py +29 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py +349 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/roformer/__init__.py +170 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/roformer/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/roformer/__pycache__/configuration_roformer.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (701 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_bert_generation"] = [
37
+ "BertGenerationDecoder",
38
+ "BertGenerationEncoder",
39
+ "BertGenerationPreTrainedModel",
40
+ "load_tf_weights_in_bert_generation",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_bert_generation import BertGenerationConfig
46
+
47
+ try:
48
+ if not is_sentencepiece_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .tokenization_bert_generation import BertGenerationTokenizer
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_bert_generation import (
62
+ BertGenerationDecoder,
63
+ BertGenerationEncoder,
64
+ BertGenerationPreTrainedModel,
65
+ load_tf_weights_in_bert_generation,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc ADDED
Binary file (6.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BertGeneration model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+
19
+
20
+ class BertGenerationConfig(PretrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
23
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
24
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
25
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
26
+ architecture.
27
+
28
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
29
+ documentation from [`PretrainedConfig`] for more information.
30
+
31
+ Args:
32
+ vocab_size (`int`, *optional*, defaults to 50358):
33
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
34
+ `inputs_ids` passed when calling [`BertGeneration`].
35
+ hidden_size (`int`, *optional*, defaults to 1024):
36
+ Dimensionality of the encoder layers and the pooler layer.
37
+ num_hidden_layers (`int`, *optional*, defaults to 24):
38
+ Number of hidden layers in the Transformer encoder.
39
+ num_attention_heads (`int`, *optional*, defaults to 16):
40
+ Number of attention heads for each attention layer in the Transformer encoder.
41
+ intermediate_size (`int`, *optional*, defaults to 4096):
42
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
43
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
44
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
45
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
46
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
47
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
48
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
49
+ The dropout ratio for the attention probabilities.
50
+ max_position_embeddings (`int`, *optional*, defaults to 512):
51
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
52
+ just in case (e.g., 512 or 1024 or 2048).
53
+ initializer_range (`float`, *optional*, defaults to 0.02):
54
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
56
+ The epsilon used by the layer normalization layers.
57
+ pad_token_id (`int`, *optional*, defaults to 0):
58
+ Padding token id.
59
+ bos_token_id (`int`, *optional*, defaults to 2):
60
+ Beginning of stream token id.
61
+ eos_token_id (`int`, *optional*, defaults to 1):
62
+ End of stream token id.
63
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
64
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
65
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
66
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
67
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
68
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+
73
+ Examples:
74
+
75
+ ```python
76
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
77
+
78
+ >>> # Initializing a BertGeneration config
79
+ >>> configuration = BertGenerationConfig()
80
+
81
+ >>> # Initializing a model (with random weights) from the config
82
+ >>> model = BertGenerationEncoder(configuration)
83
+
84
+ >>> # Accessing the model configuration
85
+ >>> configuration = model.config
86
+ ```"""
87
+
88
+ model_type = "bert-generation"
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_size=50358,
93
+ hidden_size=1024,
94
+ num_hidden_layers=24,
95
+ num_attention_heads=16,
96
+ intermediate_size=4096,
97
+ hidden_act="gelu",
98
+ hidden_dropout_prob=0.1,
99
+ attention_probs_dropout_prob=0.1,
100
+ max_position_embeddings=512,
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-12,
103
+ pad_token_id=0,
104
+ bos_token_id=2,
105
+ eos_token_id=1,
106
+ position_embedding_type="absolute",
107
+ use_cache=True,
108
+ **kwargs,
109
+ ):
110
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
111
+
112
+ self.vocab_size = vocab_size
113
+ self.hidden_size = hidden_size
114
+ self.num_hidden_layers = num_hidden_layers
115
+ self.num_attention_heads = num_attention_heads
116
+ self.hidden_act = hidden_act
117
+ self.intermediate_size = intermediate_size
118
+ self.hidden_dropout_prob = hidden_dropout_prob
119
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
120
+ self.max_position_embeddings = max_position_embeddings
121
+ self.initializer_range = initializer_range
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.position_embedding_type = position_embedding_type
124
+ self.use_cache = use_cache
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py ADDED
@@ -0,0 +1,1008 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch BERT model specific for generation."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import CrossEntropyLoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import (
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_bert_generation import BertGenerationConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder"
42
+ _CONFIG_FOR_DOC = "BertGenerationConfig"
43
+
44
+
45
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration
46
+ class BertGenerationSelfOutput(nn.Module):
47
+ def __init__(self, config):
48
+ super().__init__()
49
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
50
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
51
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
52
+
53
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
54
+ hidden_states = self.dense(hidden_states)
55
+ hidden_states = self.dropout(hidden_states)
56
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
57
+ return hidden_states
58
+
59
+
60
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
61
+ class BertGenerationSelfAttention(nn.Module):
62
+ def __init__(self, config, position_embedding_type=None):
63
+ super().__init__()
64
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
65
+ raise ValueError(
66
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
67
+ f"heads ({config.num_attention_heads})"
68
+ )
69
+
70
+ self.num_attention_heads = config.num_attention_heads
71
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
72
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
73
+
74
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
75
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
76
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
77
+
78
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
79
+ self.position_embedding_type = position_embedding_type or getattr(
80
+ config, "position_embedding_type", "absolute"
81
+ )
82
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
83
+ self.max_position_embeddings = config.max_position_embeddings
84
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
85
+
86
+ self.is_decoder = config.is_decoder
87
+
88
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
89
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
90
+ x = x.view(new_x_shape)
91
+ return x.permute(0, 2, 1, 3)
92
+
93
+ def forward(
94
+ self,
95
+ hidden_states: torch.Tensor,
96
+ attention_mask: Optional[torch.FloatTensor] = None,
97
+ head_mask: Optional[torch.FloatTensor] = None,
98
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
99
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
100
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
101
+ output_attentions: Optional[bool] = False,
102
+ ) -> Tuple[torch.Tensor]:
103
+ mixed_query_layer = self.query(hidden_states)
104
+
105
+ # If this is instantiated as a cross-attention module, the keys
106
+ # and values come from an encoder; the attention mask needs to be
107
+ # such that the encoder's padding tokens are not attended to.
108
+ is_cross_attention = encoder_hidden_states is not None
109
+
110
+ if is_cross_attention and past_key_value is not None:
111
+ # reuse k,v, cross_attentions
112
+ key_layer = past_key_value[0]
113
+ value_layer = past_key_value[1]
114
+ attention_mask = encoder_attention_mask
115
+ elif is_cross_attention:
116
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
117
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
118
+ attention_mask = encoder_attention_mask
119
+ elif past_key_value is not None:
120
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
121
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
122
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
123
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
124
+ else:
125
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
126
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
127
+
128
+ query_layer = self.transpose_for_scores(mixed_query_layer)
129
+
130
+ use_cache = past_key_value is not None
131
+ if self.is_decoder:
132
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
133
+ # Further calls to cross_attention layer can then reuse all cross-attention
134
+ # key/value_states (first "if" case)
135
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
136
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
137
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
138
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
139
+ past_key_value = (key_layer, value_layer)
140
+
141
+ # Take the dot product between "query" and "key" to get the raw attention scores.
142
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
143
+
144
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
145
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
146
+ if use_cache:
147
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
148
+ -1, 1
149
+ )
150
+ else:
151
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
152
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
153
+ distance = position_ids_l - position_ids_r
154
+
155
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
156
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
157
+
158
+ if self.position_embedding_type == "relative_key":
159
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
160
+ attention_scores = attention_scores + relative_position_scores
161
+ elif self.position_embedding_type == "relative_key_query":
162
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
163
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
164
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
165
+
166
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
167
+ if attention_mask is not None:
168
+ # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function)
169
+ attention_scores = attention_scores + attention_mask
170
+
171
+ # Normalize the attention scores to probabilities.
172
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
173
+
174
+ # This is actually dropping out entire tokens to attend to, which might
175
+ # seem a bit unusual, but is taken from the original Transformer paper.
176
+ attention_probs = self.dropout(attention_probs)
177
+
178
+ # Mask heads if we want to
179
+ if head_mask is not None:
180
+ attention_probs = attention_probs * head_mask
181
+
182
+ context_layer = torch.matmul(attention_probs, value_layer)
183
+
184
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
185
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
186
+ context_layer = context_layer.view(new_context_layer_shape)
187
+
188
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
189
+
190
+ if self.is_decoder:
191
+ outputs = outputs + (past_key_value,)
192
+ return outputs
193
+
194
+
195
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration
196
+ class BertGenerationAttention(nn.Module):
197
+ def __init__(self, config, position_embedding_type=None):
198
+ super().__init__()
199
+ self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type)
200
+ self.output = BertGenerationSelfOutput(config)
201
+ self.pruned_heads = set()
202
+
203
+ def prune_heads(self, heads):
204
+ if len(heads) == 0:
205
+ return
206
+ heads, index = find_pruneable_heads_and_indices(
207
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
208
+ )
209
+
210
+ # Prune linear layers
211
+ self.self.query = prune_linear_layer(self.self.query, index)
212
+ self.self.key = prune_linear_layer(self.self.key, index)
213
+ self.self.value = prune_linear_layer(self.self.value, index)
214
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
215
+
216
+ # Update hyper params and store pruned heads
217
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
218
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
219
+ self.pruned_heads = self.pruned_heads.union(heads)
220
+
221
+ def forward(
222
+ self,
223
+ hidden_states: torch.Tensor,
224
+ attention_mask: Optional[torch.FloatTensor] = None,
225
+ head_mask: Optional[torch.FloatTensor] = None,
226
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
227
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
228
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
229
+ output_attentions: Optional[bool] = False,
230
+ ) -> Tuple[torch.Tensor]:
231
+ self_outputs = self.self(
232
+ hidden_states,
233
+ attention_mask,
234
+ head_mask,
235
+ encoder_hidden_states,
236
+ encoder_attention_mask,
237
+ past_key_value,
238
+ output_attentions,
239
+ )
240
+ attention_output = self.output(self_outputs[0], hidden_states)
241
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
242
+ return outputs
243
+
244
+
245
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration
246
+ class BertGenerationIntermediate(nn.Module):
247
+ def __init__(self, config):
248
+ super().__init__()
249
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
250
+ if isinstance(config.hidden_act, str):
251
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
252
+ else:
253
+ self.intermediate_act_fn = config.hidden_act
254
+
255
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
256
+ hidden_states = self.dense(hidden_states)
257
+ hidden_states = self.intermediate_act_fn(hidden_states)
258
+ return hidden_states
259
+
260
+
261
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration
262
+ class BertGenerationOutput(nn.Module):
263
+ def __init__(self, config):
264
+ super().__init__()
265
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
266
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
267
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
268
+
269
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
270
+ hidden_states = self.dense(hidden_states)
271
+ hidden_states = self.dropout(hidden_states)
272
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
273
+ return hidden_states
274
+
275
+
276
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
277
+ class BertGenerationLayer(nn.Module):
278
+ def __init__(self, config):
279
+ super().__init__()
280
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
281
+ self.seq_len_dim = 1
282
+ self.attention = BertGenerationAttention(config)
283
+ self.is_decoder = config.is_decoder
284
+ self.add_cross_attention = config.add_cross_attention
285
+ if self.add_cross_attention:
286
+ if not self.is_decoder:
287
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
288
+ self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute")
289
+ self.intermediate = BertGenerationIntermediate(config)
290
+ self.output = BertGenerationOutput(config)
291
+
292
+ def forward(
293
+ self,
294
+ hidden_states: torch.Tensor,
295
+ attention_mask: Optional[torch.FloatTensor] = None,
296
+ head_mask: Optional[torch.FloatTensor] = None,
297
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
298
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
299
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
300
+ output_attentions: Optional[bool] = False,
301
+ ) -> Tuple[torch.Tensor]:
302
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
303
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
304
+ self_attention_outputs = self.attention(
305
+ hidden_states,
306
+ attention_mask,
307
+ head_mask,
308
+ output_attentions=output_attentions,
309
+ past_key_value=self_attn_past_key_value,
310
+ )
311
+ attention_output = self_attention_outputs[0]
312
+
313
+ # if decoder, the last output is tuple of self-attn cache
314
+ if self.is_decoder:
315
+ outputs = self_attention_outputs[1:-1]
316
+ present_key_value = self_attention_outputs[-1]
317
+ else:
318
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
319
+
320
+ cross_attn_present_key_value = None
321
+ if self.is_decoder and encoder_hidden_states is not None:
322
+ if not hasattr(self, "crossattention"):
323
+ raise ValueError(
324
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
325
+ " by setting `config.add_cross_attention=True`"
326
+ )
327
+
328
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
329
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
330
+ cross_attention_outputs = self.crossattention(
331
+ attention_output,
332
+ attention_mask,
333
+ head_mask,
334
+ encoder_hidden_states,
335
+ encoder_attention_mask,
336
+ cross_attn_past_key_value,
337
+ output_attentions,
338
+ )
339
+ attention_output = cross_attention_outputs[0]
340
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
341
+
342
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
343
+ cross_attn_present_key_value = cross_attention_outputs[-1]
344
+ present_key_value = present_key_value + cross_attn_present_key_value
345
+
346
+ layer_output = apply_chunking_to_forward(
347
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
348
+ )
349
+ outputs = (layer_output,) + outputs
350
+
351
+ # if decoder, return the attn key/values as the last output
352
+ if self.is_decoder:
353
+ outputs = outputs + (present_key_value,)
354
+
355
+ return outputs
356
+
357
+ def feed_forward_chunk(self, attention_output):
358
+ intermediate_output = self.intermediate(attention_output)
359
+ layer_output = self.output(intermediate_output, attention_output)
360
+ return layer_output
361
+
362
+
363
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration
364
+ class BertEncoder(nn.Module):
365
+ def __init__(self, config):
366
+ super().__init__()
367
+ self.config = config
368
+ self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)])
369
+ self.gradient_checkpointing = False
370
+
371
+ def forward(
372
+ self,
373
+ hidden_states: torch.Tensor,
374
+ attention_mask: Optional[torch.FloatTensor] = None,
375
+ head_mask: Optional[torch.FloatTensor] = None,
376
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
377
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
378
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
379
+ use_cache: Optional[bool] = None,
380
+ output_attentions: Optional[bool] = False,
381
+ output_hidden_states: Optional[bool] = False,
382
+ return_dict: Optional[bool] = True,
383
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
384
+ all_hidden_states = () if output_hidden_states else None
385
+ all_self_attentions = () if output_attentions else None
386
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
387
+
388
+ if self.gradient_checkpointing and self.training:
389
+ if use_cache:
390
+ logger.warning_once(
391
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
392
+ )
393
+ use_cache = False
394
+
395
+ next_decoder_cache = () if use_cache else None
396
+ for i, layer_module in enumerate(self.layer):
397
+ if output_hidden_states:
398
+ all_hidden_states = all_hidden_states + (hidden_states,)
399
+
400
+ layer_head_mask = head_mask[i] if head_mask is not None else None
401
+ past_key_value = past_key_values[i] if past_key_values is not None else None
402
+
403
+ if self.gradient_checkpointing and self.training:
404
+ layer_outputs = self._gradient_checkpointing_func(
405
+ layer_module.__call__,
406
+ hidden_states,
407
+ attention_mask,
408
+ layer_head_mask,
409
+ encoder_hidden_states,
410
+ encoder_attention_mask,
411
+ past_key_value,
412
+ output_attentions,
413
+ )
414
+ else:
415
+ layer_outputs = layer_module(
416
+ hidden_states,
417
+ attention_mask,
418
+ layer_head_mask,
419
+ encoder_hidden_states,
420
+ encoder_attention_mask,
421
+ past_key_value,
422
+ output_attentions,
423
+ )
424
+
425
+ hidden_states = layer_outputs[0]
426
+ if use_cache:
427
+ next_decoder_cache += (layer_outputs[-1],)
428
+ if output_attentions:
429
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
430
+ if self.config.add_cross_attention:
431
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
432
+
433
+ if output_hidden_states:
434
+ all_hidden_states = all_hidden_states + (hidden_states,)
435
+
436
+ if not return_dict:
437
+ return tuple(
438
+ v
439
+ for v in [
440
+ hidden_states,
441
+ next_decoder_cache,
442
+ all_hidden_states,
443
+ all_self_attentions,
444
+ all_cross_attentions,
445
+ ]
446
+ if v is not None
447
+ )
448
+ return BaseModelOutputWithPastAndCrossAttentions(
449
+ last_hidden_state=hidden_states,
450
+ past_key_values=next_decoder_cache,
451
+ hidden_states=all_hidden_states,
452
+ attentions=all_self_attentions,
453
+ cross_attentions=all_cross_attentions,
454
+ )
455
+
456
+
457
+ def load_tf_weights_in_bert_generation(
458
+ model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False
459
+ ):
460
+ try:
461
+ import numpy as np
462
+ import tensorflow.compat.v1 as tf
463
+ import tensorflow_hub as hub
464
+ import tensorflow_text # noqa: F401
465
+
466
+ tf.disable_eager_execution()
467
+ except ImportError:
468
+ logger.error(
469
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
470
+ "https://www.tensorflow.org/install/ for installation instructions."
471
+ )
472
+ raise
473
+ tf_model = hub.Module(tf_hub_path)
474
+ init = tf.global_variables_initializer()
475
+ with tf.Session() as sess:
476
+ init.run()
477
+ all_variables = tf_model.variable_map
478
+ keep_track_variables = all_variables.copy()
479
+ for key in list(all_variables.keys()):
480
+ if "global" in key:
481
+ logger.info(f"Skipping {key}...")
482
+ continue
483
+ if not is_encoder:
484
+ model_pointer = getattr(model, model_class)
485
+ else:
486
+ model_pointer = model
487
+ is_embedding = False
488
+ logger.info(f"Trying to match {key}...")
489
+ # remove start_string = "module/bert/"
490
+ sub_layers = key.split("/")[2:]
491
+ if is_encoder_named_decoder and sub_layers[0] == "encoder":
492
+ logger.info(f"Skipping encoder layer {key} for decoder")
493
+ continue
494
+ if is_encoder and sub_layers[0] == "decoder":
495
+ logger.info(f"Skipping decoder layer {key} for encoder")
496
+ continue
497
+ for i, sub_layer in enumerate(sub_layers):
498
+ if sub_layer == "embeddings":
499
+ is_embedding = True
500
+ elif sub_layer == "LayerNorm":
501
+ is_embedding = False
502
+ if "layer" in sub_layer:
503
+ model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])]
504
+ elif sub_layer in ["kernel", "gamma"]:
505
+ model_pointer = model_pointer.weight
506
+ elif sub_layer == "beta":
507
+ model_pointer = model_pointer.bias
508
+ elif sub_layer == "encdec":
509
+ model_pointer = model_pointer.crossattention.self
510
+ elif sub_layer == "encdec_output":
511
+ model_pointer = model_pointer.crossattention.output
512
+ elif is_encoder_named_decoder and sub_layer == "decoder":
513
+ model_pointer = model_pointer.encoder
514
+ else:
515
+ if sub_layer == "attention" and "encdec" in sub_layers[i + 1]:
516
+ continue
517
+ try:
518
+ model_pointer = getattr(model_pointer, sub_layer)
519
+ except AttributeError:
520
+ logger.info(f"Skipping to initialize {key} at {sub_layer}...")
521
+ raise AttributeError
522
+
523
+ array = np.asarray(sess.run(all_variables[key]))
524
+ if not is_embedding:
525
+ logger.info(f"Transposing numpy weight of shape {array.shape} for {key}")
526
+ array = np.transpose(array)
527
+ else:
528
+ model_pointer = model_pointer.weight
529
+
530
+ if model_pointer.shape != array.shape:
531
+ raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
532
+ logger.info(f"Initialize PyTorch weight {key}")
533
+
534
+ model_pointer.data = torch.from_numpy(array.astype(np.float32))
535
+ keep_track_variables.pop(key, None)
536
+
537
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}")
538
+ return model
539
+
540
+
541
+ class BertGenerationEmbeddings(nn.Module):
542
+ """Construct the embeddings from word and position embeddings."""
543
+
544
+ def __init__(self, config):
545
+ super().__init__()
546
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
547
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
548
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
549
+ # any TensorFlow checkpoint file
550
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
551
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
552
+
553
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
554
+ self.register_buffer(
555
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
556
+ )
557
+
558
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
559
+ if input_ids is not None:
560
+ input_shape = input_ids.size()
561
+ else:
562
+ input_shape = inputs_embeds.size()[:-1]
563
+
564
+ seq_length = input_shape[1]
565
+
566
+ if position_ids is None:
567
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
568
+
569
+ if inputs_embeds is None:
570
+ inputs_embeds = self.word_embeddings(input_ids)
571
+ position_embeddings = self.position_embeddings(position_ids)
572
+
573
+ embeddings = inputs_embeds + position_embeddings
574
+ embeddings = self.LayerNorm(embeddings)
575
+ embeddings = self.dropout(embeddings)
576
+ return embeddings
577
+
578
+
579
+ class BertGenerationPreTrainedModel(PreTrainedModel):
580
+ """
581
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
582
+ models.
583
+ """
584
+
585
+ config_class = BertGenerationConfig
586
+ base_model_prefix = "bert"
587
+ supports_gradient_checkpointing = True
588
+
589
+ def _init_weights(self, module):
590
+ """Initialize the weights"""
591
+ if isinstance(module, nn.Linear):
592
+ # Slightly different from the TF version which uses truncated_normal for initialization
593
+ # cf https://github.com/pytorch/pytorch/pull/5617
594
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
595
+ if module.bias is not None:
596
+ module.bias.data.zero_()
597
+ elif isinstance(module, nn.Embedding):
598
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
599
+ if module.padding_idx is not None:
600
+ module.weight.data[module.padding_idx].zero_()
601
+ elif isinstance(module, nn.LayerNorm):
602
+ module.bias.data.zero_()
603
+ module.weight.data.fill_(1.0)
604
+
605
+
606
+ BERT_GENERATION_START_DOCSTRING = r"""
607
+
608
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
609
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
610
+ etc.)
611
+
612
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
613
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
614
+ and behavior.
615
+
616
+ Parameters:
617
+ config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model.
618
+ Initializing with a config file does not load the weights associated with the model, only the
619
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
620
+ """
621
+
622
+ BERT_GENERATION_INPUTS_DOCSTRING = r"""
623
+ Args:
624
+ input_ids (`torch.LongTensor` of shape `({0})`):
625
+ Indices of input sequence tokens in the vocabulary.
626
+
627
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
628
+ [`PreTrainedTokenizer.encode`] for details.
629
+
630
+ [What are input IDs?](../glossary#input-ids)
631
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
632
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ [What are attention masks?](../glossary#attention-mask)
638
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
639
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
640
+ config.max_position_embeddings - 1]`.
641
+
642
+ [What are position IDs?](../glossary#position-ids)
643
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
644
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
645
+
646
+ - 1 indicates the head is **not masked**,
647
+ - 0 indicates the head is **masked**.
648
+
649
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
650
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
651
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
652
+ model's internal embedding lookup matrix.
653
+ output_attentions (`bool`, *optional*):
654
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
655
+ tensors for more detail.
656
+ output_hidden_states (`bool`, *optional*):
657
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
658
+ more detail.
659
+ return_dict (`bool`, *optional*):
660
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
661
+ """
662
+
663
+
664
+ @add_start_docstrings(
665
+ "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.",
666
+ BERT_GENERATION_START_DOCSTRING,
667
+ )
668
+ class BertGenerationEncoder(BertGenerationPreTrainedModel):
669
+ """
670
+
671
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
672
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
673
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
674
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
675
+
676
+ This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as
677
+ described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
678
+ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.
679
+
680
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
681
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
682
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
683
+ """
684
+
685
+ def __init__(self, config):
686
+ super().__init__(config)
687
+ self.config = config
688
+
689
+ self.embeddings = BertGenerationEmbeddings(config)
690
+ self.encoder = BertEncoder(config)
691
+
692
+ # Initialize weights and apply final processing
693
+ self.post_init()
694
+
695
+ def get_input_embeddings(self):
696
+ return self.embeddings.word_embeddings
697
+
698
+ def set_input_embeddings(self, value):
699
+ self.embeddings.word_embeddings = value
700
+
701
+ def _prune_heads(self, heads_to_prune):
702
+ """
703
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
704
+ class PreTrainedModel
705
+ """
706
+ for layer, heads in heads_to_prune.items():
707
+ self.encoder.layer[layer].attention.prune_heads(heads)
708
+
709
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
710
+ @add_code_sample_docstrings(
711
+ checkpoint=_CHECKPOINT_FOR_DOC,
712
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
713
+ config_class=_CONFIG_FOR_DOC,
714
+ )
715
+ def forward(
716
+ self,
717
+ input_ids: Optional[torch.Tensor] = None,
718
+ attention_mask: Optional[torch.Tensor] = None,
719
+ position_ids: Optional[torch.Tensor] = None,
720
+ head_mask: Optional[torch.Tensor] = None,
721
+ inputs_embeds: Optional[torch.Tensor] = None,
722
+ encoder_hidden_states: Optional[torch.Tensor] = None,
723
+ encoder_attention_mask: Optional[torch.Tensor] = None,
724
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
725
+ use_cache: Optional[bool] = None,
726
+ output_attentions: Optional[bool] = None,
727
+ output_hidden_states: Optional[bool] = None,
728
+ return_dict: Optional[bool] = None,
729
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
730
+ r"""
731
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
732
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
733
+ the model is configured as a decoder.
734
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
735
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
736
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for
737
+ tokens that are NOT MASKED, `0` for MASKED tokens.
738
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
739
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
740
+
741
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
742
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
743
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
744
+ use_cache (`bool`, *optional*):
745
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
746
+ `past_key_values`).
747
+ """
748
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
749
+ output_hidden_states = (
750
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
751
+ )
752
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
753
+
754
+ if self.config.is_decoder:
755
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
756
+ else:
757
+ use_cache = False
758
+
759
+ if input_ids is not None and inputs_embeds is not None:
760
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
761
+ elif input_ids is not None:
762
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
763
+ input_shape = input_ids.size()
764
+ elif inputs_embeds is not None:
765
+ input_shape = inputs_embeds.size()[:-1]
766
+ else:
767
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
768
+
769
+ batch_size, seq_length = input_shape
770
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
771
+
772
+ # past_key_values_length
773
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
774
+
775
+ if attention_mask is None:
776
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
777
+
778
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
779
+ # ourselves in which case we just need to make it broadcastable to all heads.
780
+ extended_attention_mask = None
781
+ if not use_cache:
782
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
783
+
784
+ # If a 2D or 3D attention mask is provided for the cross-attention
785
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
786
+ if self.config.is_decoder and encoder_hidden_states is not None:
787
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
788
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
789
+ if encoder_attention_mask is None:
790
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
791
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
792
+ else:
793
+ encoder_extended_attention_mask = None
794
+
795
+ # Prepare head mask if needed
796
+ # 1.0 in head_mask indicate we keep the head
797
+ # attention_probs has shape bsz x n_heads x N x N
798
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
799
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
800
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
801
+
802
+ embedding_output = self.embeddings(
803
+ input_ids=input_ids,
804
+ position_ids=position_ids,
805
+ inputs_embeds=inputs_embeds,
806
+ past_key_values_length=past_key_values_length,
807
+ )
808
+
809
+ encoder_outputs = self.encoder(
810
+ embedding_output,
811
+ attention_mask=extended_attention_mask,
812
+ head_mask=head_mask,
813
+ encoder_hidden_states=encoder_hidden_states,
814
+ encoder_attention_mask=encoder_extended_attention_mask,
815
+ past_key_values=past_key_values,
816
+ use_cache=use_cache,
817
+ output_attentions=output_attentions,
818
+ output_hidden_states=output_hidden_states,
819
+ return_dict=return_dict,
820
+ )
821
+ sequence_output = encoder_outputs[0]
822
+
823
+ if not return_dict:
824
+ return (sequence_output,) + encoder_outputs[1:]
825
+
826
+ return BaseModelOutputWithPastAndCrossAttentions(
827
+ last_hidden_state=sequence_output,
828
+ past_key_values=encoder_outputs.past_key_values,
829
+ hidden_states=encoder_outputs.hidden_states,
830
+ attentions=encoder_outputs.attentions,
831
+ cross_attentions=encoder_outputs.cross_attentions,
832
+ )
833
+
834
+
835
+ class BertGenerationOnlyLMHead(nn.Module):
836
+ def __init__(self, config):
837
+ super().__init__()
838
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
839
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
840
+ self.decoder.bias = self.bias
841
+
842
+ def forward(self, hidden_states):
843
+ logits = self.decoder(hidden_states)
844
+ return logits
845
+
846
+ def _tie_weights(self):
847
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
848
+ self.bias = self.decoder.bias
849
+
850
+
851
+ @add_start_docstrings(
852
+ """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""",
853
+ BERT_GENERATION_START_DOCSTRING,
854
+ )
855
+ class BertGenerationDecoder(BertGenerationPreTrainedModel):
856
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
857
+
858
+ def __init__(self, config):
859
+ super().__init__(config)
860
+
861
+ if not config.is_decoder:
862
+ logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
863
+
864
+ self.bert = BertGenerationEncoder(config)
865
+ self.lm_head = BertGenerationOnlyLMHead(config)
866
+
867
+ # Initialize weights and apply final processing
868
+ self.post_init()
869
+
870
+ def get_output_embeddings(self):
871
+ return self.lm_head.decoder
872
+
873
+ def set_output_embeddings(self, new_embeddings):
874
+ self.lm_head.decoder = new_embeddings
875
+
876
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
877
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
878
+ def forward(
879
+ self,
880
+ input_ids: Optional[torch.Tensor] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ position_ids: Optional[torch.Tensor] = None,
883
+ head_mask: Optional[torch.Tensor] = None,
884
+ inputs_embeds: Optional[torch.Tensor] = None,
885
+ encoder_hidden_states: Optional[torch.Tensor] = None,
886
+ encoder_attention_mask: Optional[torch.Tensor] = None,
887
+ labels: Optional[torch.Tensor] = None,
888
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
889
+ use_cache: Optional[bool] = None,
890
+ output_attentions: Optional[bool] = None,
891
+ output_hidden_states: Optional[bool] = None,
892
+ return_dict: Optional[bool] = None,
893
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
894
+ r"""
895
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
896
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
897
+ the model is configured as a decoder.
898
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
899
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
900
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
901
+
902
+ - 1 for tokens that are **not masked**,
903
+ - 0 for tokens that are **masked**.
904
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
905
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
906
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
907
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
908
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
909
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
910
+
911
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
912
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
913
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
914
+ use_cache (`bool`, *optional*):
915
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
916
+ `past_key_values`).
917
+
918
+ Returns:
919
+
920
+ Example:
921
+
922
+ ```python
923
+ >>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
924
+ >>> import torch
925
+
926
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
927
+ >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
928
+ >>> config.is_decoder = True
929
+ >>> model = BertGenerationDecoder.from_pretrained(
930
+ ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
931
+ ... )
932
+
933
+ >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
934
+ >>> outputs = model(**inputs)
935
+
936
+ >>> prediction_logits = outputs.logits
937
+ ```"""
938
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
939
+ if labels is not None:
940
+ use_cache = False
941
+
942
+ outputs = self.bert(
943
+ input_ids,
944
+ attention_mask=attention_mask,
945
+ position_ids=position_ids,
946
+ head_mask=head_mask,
947
+ inputs_embeds=inputs_embeds,
948
+ encoder_hidden_states=encoder_hidden_states,
949
+ encoder_attention_mask=encoder_attention_mask,
950
+ past_key_values=past_key_values,
951
+ use_cache=use_cache,
952
+ output_attentions=output_attentions,
953
+ output_hidden_states=output_hidden_states,
954
+ return_dict=return_dict,
955
+ )
956
+
957
+ sequence_output = outputs[0]
958
+ prediction_scores = self.lm_head(sequence_output)
959
+
960
+ lm_loss = None
961
+ if labels is not None:
962
+ # we are doing next-token prediction; shift prediction scores and input ids by one
963
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
964
+ labels = labels[:, 1:].contiguous()
965
+ loss_fct = CrossEntropyLoss()
966
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
967
+
968
+ if not return_dict:
969
+ output = (prediction_scores,) + outputs[1:]
970
+ return ((lm_loss,) + output) if lm_loss is not None else output
971
+
972
+ return CausalLMOutputWithCrossAttentions(
973
+ loss=lm_loss,
974
+ logits=prediction_scores,
975
+ past_key_values=outputs.past_key_values,
976
+ hidden_states=outputs.hidden_states,
977
+ attentions=outputs.attentions,
978
+ cross_attentions=outputs.cross_attentions,
979
+ )
980
+
981
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
982
+ input_shape = input_ids.shape
983
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
984
+ if attention_mask is None:
985
+ attention_mask = input_ids.new_ones(input_shape)
986
+
987
+ # cut decoder_input_ids if past_key_values is used
988
+ if past_key_values is not None:
989
+ past_length = past_key_values[0][0].shape[2]
990
+
991
+ # Some generation methods already pass only the last input ID
992
+ if input_ids.shape[1] > past_length:
993
+ remove_prefix_length = past_length
994
+ else:
995
+ # Default to old behavior: keep only final ID
996
+ remove_prefix_length = input_ids.shape[1] - 1
997
+
998
+ input_ids = input_ids[:, remove_prefix_length:]
999
+
1000
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1001
+
1002
+ def _reorder_cache(self, past_key_values, beam_idx):
1003
+ reordered_past = ()
1004
+ for layer_past in past_key_values:
1005
+ reordered_past += (
1006
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1007
+ )
1008
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model BertGeneration."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+
33
+ class BertGenerationTokenizer(PreTrainedTokenizer):
34
+ """
35
+ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
36
+
37
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
38
+ this superclass for more information regarding those methods.
39
+
40
+ Args:
41
+ vocab_file (`str`):
42
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
43
+ contains the vocabulary necessary to instantiate a tokenizer.
44
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
45
+ The begin of sequence token.
46
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
47
+ The end of sequence token.
48
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
49
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
50
+ token instead.
51
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ sep_token (`str`, *optional*, defaults to `"<::::>"`):
54
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
55
+ sequence classification or for a text and a question for question answering. It is also used as the last
56
+ token of a sequence built with special tokens.
57
+ sp_model_kwargs (`dict`, *optional*):
58
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
59
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
60
+ to set:
61
+
62
+ - `enable_sampling`: Enable subword regularization.
63
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
64
+
65
+ - `nbest_size = {0,1}`: No sampling is performed.
66
+ - `nbest_size > 1`: samples from the nbest_size results.
67
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
68
+ using forward-filtering-and-backward-sampling algorithm.
69
+
70
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
71
+ BPE-dropout.
72
+ """
73
+
74
+ vocab_files_names = VOCAB_FILES_NAMES
75
+ prefix_tokens: List[int] = []
76
+ model_input_names = ["input_ids", "attention_mask"]
77
+
78
+ def __init__(
79
+ self,
80
+ vocab_file,
81
+ bos_token="<s>",
82
+ eos_token="</s>",
83
+ unk_token="<unk>",
84
+ pad_token="<pad>",
85
+ sep_token="<::::>",
86
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
87
+ **kwargs,
88
+ ) -> None:
89
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
90
+
91
+ self.vocab_file = vocab_file
92
+
93
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
94
+ self.sp_model.Load(vocab_file)
95
+
96
+ # Add extra_ids to the special token list
97
+ super().__init__(
98
+ bos_token=bos_token,
99
+ eos_token=eos_token,
100
+ unk_token=unk_token,
101
+ pad_token=pad_token,
102
+ sep_token=sep_token,
103
+ sp_model_kwargs=self.sp_model_kwargs,
104
+ **kwargs,
105
+ )
106
+
107
+ @property
108
+ def vocab_size(self):
109
+ return self.sp_model.get_piece_size()
110
+
111
+ def get_vocab(self):
112
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
113
+ vocab.update(self.added_tokens_encoder)
114
+ return vocab
115
+
116
+ def __getstate__(self):
117
+ state = self.__dict__.copy()
118
+ state["sp_model"] = None
119
+ return state
120
+
121
+ def __setstate__(self, d):
122
+ self.__dict__ = d
123
+
124
+ # for backward compatibility
125
+ if not hasattr(self, "sp_model_kwargs"):
126
+ self.sp_model_kwargs = {}
127
+
128
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
129
+ self.sp_model.Load(self.vocab_file)
130
+
131
+ def _tokenize(self, text: str) -> List[str]:
132
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
133
+ return self.sp_model.encode(text, out_type=str)
134
+
135
+ def _convert_token_to_id(self, token):
136
+ """Converts a token (str) in an id using the vocab."""
137
+ return self.sp_model.piece_to_id(token)
138
+
139
+ def _convert_id_to_token(self, index):
140
+ """Converts an index (integer) in a token (str) using the vocab."""
141
+ token = self.sp_model.IdToPiece(index)
142
+ return token
143
+
144
+ def convert_tokens_to_string(self, tokens):
145
+ """Converts a sequence of tokens (string) in a single string."""
146
+ current_sub_tokens = []
147
+ out_string = ""
148
+ for token in tokens:
149
+ # make sure that special tokens are not decoded using sentencepiece model
150
+ if token in self.all_special_tokens:
151
+ out_string += self.sp_model.decode(current_sub_tokens) + token
152
+ current_sub_tokens = []
153
+ else:
154
+ current_sub_tokens.append(token)
155
+ out_string += self.sp_model.decode(current_sub_tokens)
156
+ return out_string.strip()
157
+
158
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
159
+ if not os.path.isdir(save_directory):
160
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
161
+ return
162
+ out_vocab_file = os.path.join(
163
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
164
+ )
165
+
166
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
167
+ copyfile(self.vocab_file, out_vocab_file)
168
+ elif not os.path.isfile(self.vocab_file):
169
+ with open(out_vocab_file, "wb") as fi:
170
+ content_spiece_model = self.sp_model.serialized_model_proto()
171
+ fi.write(content_spiece_model)
172
+
173
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_bros": ["BROS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BrosConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_tokenizers_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["processing_bros"] = ["BrosProcessor"]
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_bros"] = [
38
+ "BROS_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "BrosPreTrainedModel",
40
+ "BrosModel",
41
+ "BrosForTokenClassification",
42
+ "BrosSpadeEEForTokenClassification",
43
+ "BrosSpadeELForTokenClassification",
44
+ ]
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_bros import BROS_PRETRAINED_CONFIG_ARCHIVE_MAP, BrosConfig
49
+
50
+ try:
51
+ if not is_tokenizers_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .processing_bros import BrosProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_bros import (
65
+ BROS_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ BrosForTokenClassification,
67
+ BrosModel,
68
+ BrosPreTrainedModel,
69
+ BrosSpadeEEForTokenClassification,
70
+ BrosSpadeELForTokenClassification,
71
+ )
72
+
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/convert_bros_to_pytorch.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc ADDED
Binary file (36.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Bros model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import BROS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class BrosConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to
30
+ instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the Bros
32
+ [jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 30522):
39
+ Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
41
+ hidden_size (`int`, *optional*, defaults to 768):
42
+ Dimensionality of the encoder layers and the pooler layer.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ intermediate_size (`int`, *optional*, defaults to 3072):
48
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
49
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
50
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
51
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
52
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
54
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout ratio for the attention probabilities.
56
+ max_position_embeddings (`int`, *optional*, defaults to 512):
57
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
58
+ just in case (e.g., 512 or 1024 or 2048).
59
+ type_vocab_size (`int`, *optional*, defaults to 2):
60
+ The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
64
+ The epsilon used by the layer normalization layers.
65
+ pad_token_id (`int`, *optional*, defaults to 0):
66
+ The index of the padding token in the token vocabulary.
67
+ dim_bbox (`int`, *optional*, defaults to 8):
68
+ The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
69
+ bbox_scale (`float`, *optional*, defaults to 100.0):
70
+ The scale factor of the bounding box coordinates.
71
+ n_relations (`int`, *optional*, defaults to 1):
72
+ The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
73
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
74
+ The dropout ratio for the classifier head.
75
+
76
+
77
+ Examples:
78
+
79
+ ```python
80
+ >>> from transformers import BrosConfig, BrosModel
81
+
82
+ >>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
83
+ >>> configuration = BrosConfig()
84
+
85
+ >>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
86
+ >>> model = BrosModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```"""
91
+
92
+ model_type = "bros"
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=30522,
97
+ hidden_size=768,
98
+ num_hidden_layers=12,
99
+ num_attention_heads=12,
100
+ intermediate_size=3072,
101
+ hidden_act="gelu",
102
+ hidden_dropout_prob=0.1,
103
+ attention_probs_dropout_prob=0.1,
104
+ max_position_embeddings=512,
105
+ type_vocab_size=2,
106
+ initializer_range=0.02,
107
+ layer_norm_eps=1e-12,
108
+ pad_token_id=0,
109
+ dim_bbox=8,
110
+ bbox_scale=100.0,
111
+ n_relations=1,
112
+ classifier_dropout_prob=0.1,
113
+ **kwargs,
114
+ ):
115
+ super().__init__(
116
+ vocab_size=vocab_size,
117
+ hidden_size=hidden_size,
118
+ num_hidden_layers=num_hidden_layers,
119
+ num_attention_heads=num_attention_heads,
120
+ intermediate_size=intermediate_size,
121
+ hidden_act=hidden_act,
122
+ hidden_dropout_prob=hidden_dropout_prob,
123
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
124
+ max_position_embeddings=max_position_embeddings,
125
+ type_vocab_size=type_vocab_size,
126
+ initializer_range=initializer_range,
127
+ layer_norm_eps=layer_norm_eps,
128
+ pad_token_id=pad_token_id,
129
+ **kwargs,
130
+ )
131
+
132
+ self.dim_bbox = dim_bbox
133
+ self.bbox_scale = bbox_scale
134
+ self.n_relations = n_relations
135
+ self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4
136
+ self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox
137
+ self.dim_bbox_projection = self.hidden_size // self.num_attention_heads
138
+ self.classifier_dropout_prob = classifier_dropout_prob
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/convert_bros_to_pytorch.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Bros checkpoints."""
16
+
17
+ import argparse
18
+
19
+ import bros # original repo
20
+ import torch
21
+
22
+ from transformers import BrosConfig, BrosModel, BrosProcessor
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ def get_configs(model_name):
31
+ bros_config = BrosConfig.from_pretrained(model_name)
32
+ return bros_config
33
+
34
+
35
+ def remove_ignore_keys_(state_dict):
36
+ ignore_keys = [
37
+ "embeddings.bbox_sinusoid_emb.inv_freq",
38
+ ]
39
+ for k in ignore_keys:
40
+ state_dict.pop(k, None)
41
+
42
+
43
+ def rename_key(name):
44
+ if name == "embeddings.bbox_projection.weight":
45
+ name = "bbox_embeddings.bbox_projection.weight"
46
+
47
+ if name == "embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq":
48
+ name = "bbox_embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq"
49
+
50
+ if name == "embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq":
51
+ name = "bbox_embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq"
52
+
53
+ return name
54
+
55
+
56
+ def convert_state_dict(orig_state_dict, model):
57
+ # rename keys
58
+ for key in orig_state_dict.copy().keys():
59
+ val = orig_state_dict.pop(key)
60
+ orig_state_dict[rename_key(key)] = val
61
+
62
+ # remove ignore keys
63
+ remove_ignore_keys_(orig_state_dict)
64
+
65
+ return orig_state_dict
66
+
67
+
68
+ def convert_bros_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
69
+ # load original model
70
+ original_model = bros.BrosModel.from_pretrained(model_name).eval()
71
+
72
+ # load HuggingFace Model
73
+ bros_config = get_configs(model_name)
74
+ model = BrosModel.from_pretrained(model_name, config=bros_config)
75
+ model.eval()
76
+
77
+ state_dict = original_model.state_dict()
78
+ new_state_dict = convert_state_dict(state_dict, model)
79
+ model.load_state_dict(new_state_dict)
80
+
81
+ # verify results
82
+
83
+ # original BROS model require 4 points (8 float values) for each bbox, prepare bbox with [batch_size, seq_len, 8] shape
84
+ bbox = torch.tensor(
85
+ [
86
+ [
87
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
88
+ [0.4396, 0.6720, 0.4659, 0.6720, 0.4659, 0.6850, 0.4396, 0.6850],
89
+ [0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850],
90
+ [0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850],
91
+ [0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000],
92
+ [0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000],
93
+ [1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
94
+ ]
95
+ ]
96
+ )
97
+
98
+ processor = BrosProcessor.from_pretrained(model_name)
99
+
100
+ encoding = processor("His name is Rocco.", return_tensors="pt")
101
+ encoding["bbox"] = bbox
102
+
103
+ original_hidden_states = original_model(**encoding).last_hidden_state
104
+ # pixel_values = processor(image, return_tensors="pt").pixel_values
105
+
106
+ last_hidden_states = model(**encoding).last_hidden_state
107
+
108
+ assert torch.allclose(original_hidden_states, last_hidden_states, atol=1e-4)
109
+
110
+ if pytorch_dump_folder_path is not None:
111
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
112
+ model.save_pretrained(pytorch_dump_folder_path)
113
+ processor.save_pretrained(pytorch_dump_folder_path)
114
+
115
+ if push_to_hub:
116
+ model.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model")
117
+ processor.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model")
118
+
119
+
120
+ if __name__ == "__main__":
121
+ parser = argparse.ArgumentParser()
122
+
123
+ # Required parameters
124
+ parser.add_argument(
125
+ "--model_name",
126
+ default="jinho8345/bros-base-uncased",
127
+ required=False,
128
+ type=str,
129
+ help="Name of the original model you'd like to convert.",
130
+ )
131
+ parser.add_argument(
132
+ "--pytorch_dump_folder_path",
133
+ default=None,
134
+ required=False,
135
+ type=str,
136
+ help="Path to the output PyTorch model directory.",
137
+ )
138
+ parser.add_argument(
139
+ "--push_to_hub",
140
+ action="store_true",
141
+ help="Whether or not to push the converted model and processor to the 🤗 hub.",
142
+ )
143
+
144
+ args = parser.parse_args()
145
+ convert_bros_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py ADDED
@@ -0,0 +1,1318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Bros model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndCrossAttentions,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_bros import BrosConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CHECKPOINT_FOR_DOC = "jinho8345/bros-base-uncased"
48
+ _CONFIG_FOR_DOC = "BrosConfig"
49
+
50
+
51
+ from ..deprecated._archive_maps import BROS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
52
+
53
+
54
+ BROS_START_DOCSTRING = r"""
55
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
56
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
57
+ and behavior.
58
+
59
+ Parameters:
60
+ config ([`BrosConfig`]): Model configuration class with all the parameters of the model.
61
+ Initializing with a config file does not load the weights associated with the model, only the
62
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
63
+ """
64
+
65
+ BROS_INPUTS_DOCSTRING = r"""
66
+ Args:
67
+ input_ids (`torch.LongTensor` of shape `({0})`):
68
+ Indices of input sequence tokens in the vocabulary.
69
+
70
+ Indices can be obtained using [`BrosProcessor`]. See [`PreTrainedTokenizer.encode`] and
71
+ [`PreTrainedTokenizer.__call__`] for details.
72
+
73
+ [What are input IDs?](../glossary#input-ids)
74
+
75
+ bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
76
+ Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
77
+ (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
78
+ bounding box.
79
+
80
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
81
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
82
+
83
+ - 1 for tokens that are **not masked**,
84
+ - 0 for tokens that are **masked**.
85
+
86
+ [What are attention masks?](../glossary#attention-mask)
87
+
88
+ bbox_first_token_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
89
+ Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
90
+
91
+ - 1 for tokens that are **not masked**,
92
+ - 0 for tokens that are **masked**.
93
+
94
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
95
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
96
+ 1]`:
97
+
98
+ - 0 corresponds to a *sentence A* token,
99
+ - 1 corresponds to a *sentence B* token.
100
+
101
+ [What are token type IDs?](../glossary#token-type-ids)
102
+
103
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
104
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
105
+ config.max_position_embeddings - 1]`.
106
+
107
+ [What are position IDs?](../glossary#position-ids)
108
+
109
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
110
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
111
+
112
+ - 1 indicates the head is **not masked**,
113
+ - 0 indicates the head is **masked**.
114
+
115
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
116
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
117
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
118
+ model's internal embedding lookup matrix.
119
+
120
+ output_attentions (`bool`, *optional*):
121
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
122
+ tensors for more detail.
123
+
124
+ output_hidden_states (`bool`, *optional*):
125
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
126
+ more detail.
127
+
128
+ return_dict (`bool`, *optional*):
129
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
130
+ """
131
+
132
+
133
+ @dataclass
134
+ class BrosSpadeOutput(ModelOutput):
135
+ """
136
+ Base class for outputs of token classification models.
137
+
138
+ Args:
139
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
140
+ Classification loss.
141
+ initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
142
+ Classification scores for entity initial tokens (before SoftMax).
143
+ subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`):
144
+ Classification scores for entity sequence tokens (before SoftMax).
145
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
146
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
147
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
148
+
149
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
150
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
151
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
152
+ sequence_length)`.
153
+
154
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
155
+ heads.
156
+ """
157
+
158
+ loss: Optional[torch.FloatTensor] = None
159
+ initial_token_logits: torch.FloatTensor = None
160
+ subsequent_token_logits: torch.FloatTensor = None
161
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
162
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
163
+
164
+
165
+ class BrosPositionalEmbedding1D(nn.Module):
166
+ # Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15
167
+
168
+ def __init__(self, config):
169
+ super(BrosPositionalEmbedding1D, self).__init__()
170
+
171
+ self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d
172
+
173
+ inv_freq = 1 / (
174
+ 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d)
175
+ )
176
+ self.register_buffer("inv_freq", inv_freq)
177
+
178
+ def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
179
+ seq_size = pos_seq.size()
180
+ b1, b2, b3 = seq_size
181
+ sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2)
182
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
183
+ return pos_emb
184
+
185
+
186
+ class BrosPositionalEmbedding2D(nn.Module):
187
+ def __init__(self, config):
188
+ super(BrosPositionalEmbedding2D, self).__init__()
189
+
190
+ self.dim_bbox = config.dim_bbox
191
+ self.x_pos_emb = BrosPositionalEmbedding1D(config)
192
+ self.y_pos_emb = BrosPositionalEmbedding1D(config)
193
+
194
+ def forward(self, bbox: torch.Tensor) -> torch.Tensor:
195
+ stack = []
196
+ for i in range(self.dim_bbox):
197
+ if i % 2 == 0:
198
+ stack.append(self.x_pos_emb(bbox[..., i]))
199
+ else:
200
+ stack.append(self.y_pos_emb(bbox[..., i]))
201
+ bbox_pos_emb = torch.cat(stack, dim=-1)
202
+ return bbox_pos_emb
203
+
204
+
205
+ class BrosBboxEmbeddings(nn.Module):
206
+ def __init__(self, config):
207
+ super(BrosBboxEmbeddings, self).__init__()
208
+ self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
209
+ self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
210
+
211
+ def forward(self, bbox: torch.Tensor):
212
+ bbox_t = bbox.transpose(0, 1)
213
+ bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
214
+ bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
215
+ bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
216
+
217
+ return bbox_pos_emb
218
+
219
+
220
+ class BrosTextEmbeddings(nn.Module):
221
+ """Construct the embeddings from word, position and token_type embeddings."""
222
+
223
+ def __init__(self, config):
224
+ super().__init__()
225
+
226
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
227
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
228
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
229
+
230
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
231
+ # any TensorFlow checkpoint file
232
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
233
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
234
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
235
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
236
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
237
+ self.register_buffer(
238
+ "token_type_ids",
239
+ torch.zeros(
240
+ self.position_ids.size(),
241
+ dtype=torch.long,
242
+ device=self.position_ids.device,
243
+ ),
244
+ persistent=False,
245
+ )
246
+
247
+ def forward(
248
+ self,
249
+ input_ids: Optional[torch.Tensor] = None,
250
+ token_type_ids: Optional[torch.Tensor] = None,
251
+ position_ids: Optional[torch.Tensor] = None,
252
+ inputs_embeds: Optional[torch.Tensor] = None,
253
+ past_key_values_length: int = 0,
254
+ ) -> torch.Tensor:
255
+ if input_ids is not None:
256
+ input_shape = input_ids.size()
257
+ else:
258
+ input_shape = inputs_embeds.size()[:-1]
259
+
260
+ seq_length = input_shape[1]
261
+
262
+ if position_ids is None:
263
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
264
+
265
+ if token_type_ids is None:
266
+ if hasattr(self, "token_type_ids"):
267
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
268
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
269
+ token_type_ids = buffered_token_type_ids_expanded
270
+ else:
271
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
272
+
273
+ if inputs_embeds is None:
274
+ inputs_embeds = self.word_embeddings(input_ids)
275
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
276
+
277
+ embeddings = inputs_embeds + token_type_embeddings
278
+ if self.position_embedding_type == "absolute":
279
+ position_embeddings = self.position_embeddings(position_ids)
280
+ embeddings += position_embeddings
281
+ embeddings = self.LayerNorm(embeddings)
282
+ embeddings = self.dropout(embeddings)
283
+ return embeddings
284
+
285
+
286
+ class BrosSelfAttention(nn.Module):
287
+ def __init__(self, config):
288
+ super().__init__()
289
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
290
+ raise ValueError(
291
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
292
+ f"heads ({config.num_attention_heads})"
293
+ )
294
+
295
+ self.num_attention_heads = config.num_attention_heads
296
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
297
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
298
+
299
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
300
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
301
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
302
+
303
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
304
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
305
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
306
+ self.max_position_embeddings = config.max_position_embeddings
307
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
308
+
309
+ self.is_decoder = config.is_decoder
310
+
311
+ def transpose_for_scores(self, x: torch.Tensor):
312
+ new_x_shape = x.size()[:-1] + (
313
+ self.num_attention_heads,
314
+ self.attention_head_size,
315
+ )
316
+ x = x.view(*new_x_shape)
317
+ return x.permute(0, 2, 1, 3)
318
+
319
+ def forward(
320
+ self,
321
+ hidden_states: torch.Tensor,
322
+ bbox_pos_emb: torch.Tensor,
323
+ attention_mask: Optional[torch.Tensor] = None,
324
+ head_mask: Optional[torch.Tensor] = None,
325
+ encoder_hidden_states: Optional[torch.Tensor] = None,
326
+ encoder_attention_mask: Optional[torch.Tensor] = None,
327
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
328
+ output_attentions: Optional[torch.Tensor] = False,
329
+ ) -> Tuple[torch.Tensor]:
330
+ mixed_query_layer = self.query(hidden_states)
331
+
332
+ # If this is instantiated as a cross-attention module, the keys
333
+ # and values come from an encoder; the attention mask needs to be
334
+ # such that the encoder's padding tokens are not attended to.
335
+ is_cross_attention = encoder_hidden_states is not None
336
+
337
+ if is_cross_attention and past_key_value is not None:
338
+ # reuse k,v, cross_attentions
339
+ key_layer = past_key_value[0]
340
+ value_layer = past_key_value[1]
341
+ attention_mask = encoder_attention_mask
342
+ elif is_cross_attention:
343
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
344
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
345
+ attention_mask = encoder_attention_mask
346
+ elif past_key_value is not None:
347
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
348
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
349
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
350
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
351
+ else:
352
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
353
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
354
+
355
+ query_layer = self.transpose_for_scores(mixed_query_layer)
356
+
357
+ if self.is_decoder:
358
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
359
+ # Further calls to cross_attention layer can then reuse all cross-attention
360
+ # key/value_states (first "if" case)
361
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
362
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
363
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
364
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
365
+ past_key_value = (key_layer, value_layer)
366
+
367
+ # Take the dot product between "query" and "key" to get the raw attention scores.
368
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
369
+
370
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
371
+ seq_length = hidden_states.size()[1]
372
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
373
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
374
+ distance = position_ids_l - position_ids_r
375
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
376
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
377
+
378
+ if self.position_embedding_type == "relative_key":
379
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
380
+ attention_scores = attention_scores + relative_position_scores
381
+ elif self.position_embedding_type == "relative_key_query":
382
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
383
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
384
+
385
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
386
+
387
+ # bbox positional encoding
388
+ batch_size, n_head, seq_length, d_head = query_layer.shape
389
+ bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head)
390
+ bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3])
391
+ bbox_pos_scores = torch.einsum("bnid,bijd->bnij", (query_layer, bbox_pos_emb))
392
+
393
+ attention_scores = attention_scores + bbox_pos_scores
394
+
395
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
396
+ if attention_mask is not None:
397
+ # Apply the attention mask is (precomputed for all layers in BrosModel forward() function)
398
+ attention_scores = attention_scores + attention_mask
399
+
400
+ # Normalize the attention scores to probabilities.
401
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
402
+
403
+ # This is actually dropping out entire tokens to attend to, which might
404
+ # seem a bit unusual, but is taken from the original Transformer paper.
405
+ attention_probs = self.dropout(attention_probs)
406
+
407
+ # Mask heads if we want to
408
+ if head_mask is not None:
409
+ attention_probs = attention_probs * head_mask
410
+
411
+ context_layer = torch.matmul(attention_probs, value_layer)
412
+
413
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
414
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
415
+ context_layer = context_layer.view(*new_context_layer_shape)
416
+
417
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
418
+
419
+ if self.is_decoder:
420
+ outputs = outputs + (past_key_value,)
421
+ return outputs
422
+
423
+
424
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Bros
425
+ class BrosSelfOutput(nn.Module):
426
+ def __init__(self, config):
427
+ super().__init__()
428
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
429
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
430
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
431
+
432
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
433
+ hidden_states = self.dense(hidden_states)
434
+ hidden_states = self.dropout(hidden_states)
435
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
436
+ return hidden_states
437
+
438
+
439
+ class BrosAttention(nn.Module):
440
+ def __init__(self, config):
441
+ super().__init__()
442
+ self.self = BrosSelfAttention(config)
443
+ self.output = BrosSelfOutput(config)
444
+ self.pruned_heads = set()
445
+
446
+ def prune_heads(self, heads):
447
+ if len(heads) == 0:
448
+ return
449
+ heads, index = find_pruneable_heads_and_indices(
450
+ heads,
451
+ self.self.num_attention_heads,
452
+ self.self.attention_head_size,
453
+ self.pruned_heads,
454
+ )
455
+
456
+ # Prune linear layers
457
+ self.self.query = prune_linear_layer(self.self.query, index)
458
+ self.self.key = prune_linear_layer(self.self.key, index)
459
+ self.self.value = prune_linear_layer(self.self.value, index)
460
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
461
+
462
+ # Update hyper params and store pruned heads
463
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
464
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
465
+ self.pruned_heads = self.pruned_heads.union(heads)
466
+
467
+ def forward(
468
+ self,
469
+ hidden_states: torch.Tensor,
470
+ bbox_pos_emb: torch.Tensor,
471
+ attention_mask: Optional[torch.Tensor] = None,
472
+ head_mask: Optional[torch.Tensor] = None,
473
+ encoder_hidden_states: Optional[torch.Tensor] = None,
474
+ encoder_attention_mask: Optional[torch.Tensor] = None,
475
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
476
+ output_attentions: Optional[bool] = False,
477
+ ) -> Tuple[torch.Tensor]:
478
+ self_outputs = self.self(
479
+ hidden_states=hidden_states,
480
+ bbox_pos_emb=bbox_pos_emb,
481
+ attention_mask=attention_mask,
482
+ head_mask=head_mask,
483
+ encoder_hidden_states=encoder_hidden_states,
484
+ encoder_attention_mask=encoder_attention_mask,
485
+ past_key_value=past_key_value,
486
+ output_attentions=output_attentions,
487
+ )
488
+ attention_output = self.output(self_outputs[0], hidden_states)
489
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
490
+ return outputs
491
+
492
+
493
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Bros
494
+ class BrosIntermediate(nn.Module):
495
+ def __init__(self, config):
496
+ super().__init__()
497
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
498
+ if isinstance(config.hidden_act, str):
499
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
500
+ else:
501
+ self.intermediate_act_fn = config.hidden_act
502
+
503
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
504
+ hidden_states = self.dense(hidden_states)
505
+ hidden_states = self.intermediate_act_fn(hidden_states)
506
+ return hidden_states
507
+
508
+
509
+ class BrosOutput(nn.Module):
510
+ def __init__(self, config):
511
+ super().__init__()
512
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
513
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
514
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
515
+
516
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
517
+ hidden_states = self.dense(hidden_states)
518
+ hidden_states = self.dropout(hidden_states)
519
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
520
+ return hidden_states
521
+
522
+
523
+ class BrosLayer(nn.Module):
524
+ def __init__(self, config):
525
+ super().__init__()
526
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
527
+ self.seq_len_dim = 1
528
+ self.attention = BrosAttention(config)
529
+ self.is_decoder = config.is_decoder
530
+ self.add_cross_attention = config.add_cross_attention
531
+ if self.add_cross_attention:
532
+ if not self.is_decoder:
533
+ raise Exception(f"{self} should be used as a decoder model if cross attention is added")
534
+ self.crossattention = BrosAttention(config)
535
+ self.intermediate = BrosIntermediate(config)
536
+ self.output = BrosOutput(config)
537
+
538
+ def forward(
539
+ self,
540
+ hidden_states: torch.Tensor,
541
+ bbox_pos_emb: torch.Tensor,
542
+ attention_mask: Optional[torch.FloatTensor] = None,
543
+ head_mask: Optional[torch.FloatTensor] = None,
544
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
545
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
546
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
547
+ output_attentions: Optional[bool] = False,
548
+ ) -> Tuple[torch.Tensor]:
549
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
550
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
551
+ self_attention_outputs = self.attention(
552
+ hidden_states,
553
+ bbox_pos_emb=bbox_pos_emb,
554
+ attention_mask=attention_mask,
555
+ head_mask=head_mask,
556
+ output_attentions=output_attentions,
557
+ past_key_value=self_attn_past_key_value,
558
+ )
559
+ attention_output = self_attention_outputs[0]
560
+
561
+ # if decoder, the last output is tuple of self-attn cache
562
+ if self.is_decoder:
563
+ outputs = self_attention_outputs[1:-1]
564
+ present_key_value = self_attention_outputs[-1]
565
+ else:
566
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
567
+
568
+ cross_attn_present_key_value = None
569
+ if self.is_decoder and encoder_hidden_states is not None:
570
+ if hasattr(self, "crossattention"):
571
+ raise Exception(
572
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
573
+ )
574
+
575
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
576
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
577
+ cross_attention_outputs = self.crossattention(
578
+ attention_output,
579
+ attention_mask,
580
+ head_mask,
581
+ encoder_hidden_states,
582
+ encoder_attention_mask,
583
+ cross_attn_past_key_value,
584
+ output_attentions,
585
+ )
586
+ attention_output = cross_attention_outputs[0]
587
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
588
+
589
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
590
+ cross_attn_present_key_value = cross_attention_outputs[-1]
591
+ present_key_value = present_key_value + cross_attn_present_key_value
592
+
593
+ layer_output = apply_chunking_to_forward(
594
+ self.feed_forward_chunk,
595
+ self.chunk_size_feed_forward,
596
+ self.seq_len_dim,
597
+ attention_output,
598
+ )
599
+ outputs = (layer_output,) + outputs
600
+
601
+ # if decoder, return the attn key/values as the last output
602
+ if self.is_decoder:
603
+ outputs = outputs + (present_key_value,)
604
+
605
+ return outputs
606
+
607
+ def feed_forward_chunk(self, attention_output):
608
+ intermediate_output = self.intermediate(attention_output)
609
+ layer_output = self.output(intermediate_output, attention_output)
610
+ return layer_output
611
+
612
+
613
+ class BrosEncoder(nn.Module):
614
+ def __init__(self, config):
615
+ super().__init__()
616
+ self.config = config
617
+ self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)])
618
+
619
+ def forward(
620
+ self,
621
+ hidden_states: torch.Tensor,
622
+ bbox_pos_emb: torch.Tensor,
623
+ attention_mask: Optional[torch.FloatTensor] = None,
624
+ head_mask: Optional[torch.FloatTensor] = None,
625
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
626
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
627
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
628
+ use_cache: Optional[bool] = None,
629
+ output_attentions: Optional[bool] = False,
630
+ output_hidden_states: Optional[bool] = False,
631
+ return_dict: Optional[bool] = True,
632
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
633
+ all_hidden_states = () if output_hidden_states else None
634
+ all_self_attentions = () if output_attentions else None
635
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
636
+
637
+ next_decoder_cache = () if use_cache else None
638
+ for i, layer_module in enumerate(self.layer):
639
+ if output_hidden_states:
640
+ all_hidden_states = all_hidden_states + (hidden_states,)
641
+
642
+ layer_head_mask = head_mask[i] if head_mask is not None else None
643
+ past_key_value = past_key_values[i] if past_key_values is not None else None
644
+
645
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
646
+ if use_cache:
647
+ logger.warning(
648
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
649
+ "`use_cache=False`..."
650
+ )
651
+ use_cache = False
652
+ layer_outputs = self._gradient_checkpointing_func(
653
+ layer_module.__call__,
654
+ hidden_states,
655
+ bbox_pos_emb,
656
+ attention_mask,
657
+ layer_head_mask,
658
+ encoder_hidden_states,
659
+ encoder_attention_mask,
660
+ output_attentions,
661
+ )
662
+ else:
663
+ layer_outputs = layer_module(
664
+ hidden_states=hidden_states,
665
+ bbox_pos_emb=bbox_pos_emb,
666
+ attention_mask=attention_mask,
667
+ head_mask=layer_head_mask,
668
+ encoder_hidden_states=encoder_hidden_states,
669
+ encoder_attention_mask=encoder_attention_mask,
670
+ past_key_value=past_key_value,
671
+ output_attentions=output_attentions,
672
+ )
673
+
674
+ hidden_states = layer_outputs[0]
675
+ if use_cache:
676
+ next_decoder_cache += (layer_outputs[-1],)
677
+ if output_attentions:
678
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
679
+ if self.config.add_cross_attention:
680
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
681
+
682
+ if output_hidden_states:
683
+ all_hidden_states = all_hidden_states + (hidden_states,)
684
+
685
+ if not return_dict:
686
+ return tuple(
687
+ v
688
+ for v in [
689
+ hidden_states,
690
+ next_decoder_cache,
691
+ all_hidden_states,
692
+ all_self_attentions,
693
+ all_cross_attentions,
694
+ ]
695
+ if v is not None
696
+ )
697
+ return BaseModelOutputWithPastAndCrossAttentions(
698
+ last_hidden_state=hidden_states,
699
+ past_key_values=next_decoder_cache,
700
+ hidden_states=all_hidden_states,
701
+ attentions=all_self_attentions,
702
+ cross_attentions=all_cross_attentions,
703
+ )
704
+
705
+
706
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Bros
707
+ class BrosPooler(nn.Module):
708
+ def __init__(self, config):
709
+ super().__init__()
710
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
711
+ self.activation = nn.Tanh()
712
+
713
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
714
+ # We "pool" the model by simply taking the hidden state corresponding
715
+ # to the first token.
716
+ first_token_tensor = hidden_states[:, 0]
717
+ pooled_output = self.dense(first_token_tensor)
718
+ pooled_output = self.activation(pooled_output)
719
+ return pooled_output
720
+
721
+
722
+ class BrosRelationExtractor(nn.Module):
723
+ def __init__(self, config):
724
+ super().__init__()
725
+ self.n_relations = config.n_relations
726
+ self.backbone_hidden_size = config.hidden_size
727
+ self.head_hidden_size = config.hidden_size
728
+ self.classifier_dropout_prob = config.classifier_dropout_prob
729
+
730
+ self.drop = nn.Dropout(self.classifier_dropout_prob)
731
+ self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
732
+
733
+ self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
734
+
735
+ self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size))
736
+
737
+ def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
738
+ query_layer = self.query(self.drop(query_layer))
739
+
740
+ dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1)
741
+ key_layer = torch.cat([key_layer, dummy_vec], axis=0)
742
+ key_layer = self.key(self.drop(key_layer))
743
+
744
+ query_layer = query_layer.view(
745
+ query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size
746
+ )
747
+ key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size)
748
+
749
+ relation_score = torch.matmul(
750
+ query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0)
751
+ ) # equivalent to torch.einsum("ibnd,jbnd->nbij", (query_layer, key_layer))
752
+
753
+ return relation_score
754
+
755
+
756
+ class BrosPreTrainedModel(PreTrainedModel):
757
+ """
758
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
759
+ models.
760
+ """
761
+
762
+ config_class = BrosConfig
763
+ base_model_prefix = "bros"
764
+
765
+ def _init_weights(self, module):
766
+ """Initialize the weights"""
767
+ if isinstance(module, nn.Linear):
768
+ # Slightly different from the TF version which uses truncated_normal for initialization
769
+ # cf https://github.com/pytorch/pytorch/pull/5617
770
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
771
+ if module.bias is not None:
772
+ module.bias.data.zero_()
773
+ elif isinstance(module, nn.Embedding):
774
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
775
+ if module.padding_idx is not None:
776
+ module.weight.data[module.padding_idx].zero_()
777
+ elif isinstance(module, nn.LayerNorm):
778
+ module.bias.data.zero_()
779
+ module.weight.data.fill_(1.0)
780
+
781
+
782
+ @add_start_docstrings(
783
+ "The bare Bros Model transformer outputting raw hidden-states without any specific head on top.",
784
+ BROS_START_DOCSTRING,
785
+ )
786
+ class BrosModel(BrosPreTrainedModel):
787
+ def __init__(self, config, add_pooling_layer=True):
788
+ super().__init__(config)
789
+ self.config = config
790
+
791
+ self.embeddings = BrosTextEmbeddings(config)
792
+ self.bbox_embeddings = BrosBboxEmbeddings(config)
793
+ self.encoder = BrosEncoder(config)
794
+
795
+ self.pooler = BrosPooler(config) if add_pooling_layer else None
796
+
797
+ self.init_weights()
798
+
799
+ def get_input_embeddings(self):
800
+ return self.embeddings.word_embeddings
801
+
802
+ def set_input_embeddings(self, value):
803
+ self.embeddings.word_embeddings = value
804
+
805
+ def _prune_heads(self, heads_to_prune):
806
+ """
807
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
808
+ class PreTrainedModel
809
+ """
810
+ for layer, heads in heads_to_prune.items():
811
+ self.encoder.layer[layer].attention.prune_heads(heads)
812
+
813
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
814
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
815
+ def forward(
816
+ self,
817
+ input_ids: Optional[torch.Tensor] = None,
818
+ bbox: Optional[torch.Tensor] = None,
819
+ attention_mask: Optional[torch.Tensor] = None,
820
+ token_type_ids: Optional[torch.Tensor] = None,
821
+ position_ids: Optional[torch.Tensor] = None,
822
+ head_mask: Optional[torch.Tensor] = None,
823
+ inputs_embeds: Optional[torch.Tensor] = None,
824
+ encoder_hidden_states: Optional[torch.Tensor] = None,
825
+ encoder_attention_mask: Optional[torch.Tensor] = None,
826
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
827
+ use_cache: Optional[bool] = None,
828
+ output_attentions: Optional[bool] = None,
829
+ output_hidden_states: Optional[bool] = None,
830
+ return_dict: Optional[bool] = None,
831
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
832
+ r"""
833
+ Returns:
834
+
835
+ Examples:
836
+
837
+ ```python
838
+ >>> import torch
839
+ >>> from transformers import BrosProcessor, BrosModel
840
+
841
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
842
+
843
+ >>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
844
+
845
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
846
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
847
+ >>> encoding["bbox"] = bbox
848
+
849
+ >>> outputs = model(**encoding)
850
+ >>> last_hidden_states = outputs.last_hidden_state
851
+ ```"""
852
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
853
+ output_hidden_states = (
854
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
855
+ )
856
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
857
+
858
+ if self.config.is_decoder:
859
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
860
+ else:
861
+ use_cache = False
862
+
863
+ if input_ids is not None and inputs_embeds is not None:
864
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
865
+ elif input_ids is not None:
866
+ input_shape = input_ids.size()
867
+ elif inputs_embeds is not None:
868
+ input_shape = inputs_embeds.size()[:-1]
869
+ else:
870
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
871
+
872
+ if bbox is None:
873
+ raise ValueError("You have to specify bbox")
874
+
875
+ batch_size, seq_length = input_shape
876
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
877
+
878
+ # past_key_values_length
879
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
880
+
881
+ if attention_mask is None:
882
+ attention_mask = torch.ones(input_shape, device=device)
883
+
884
+ if token_type_ids is None:
885
+ if hasattr(self.embeddings, "token_type_ids"):
886
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
887
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
888
+ token_type_ids = buffered_token_type_ids_expanded
889
+ else:
890
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
891
+
892
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
893
+ # ourselves in which case we just need to make it broadcastable to all heads.
894
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
895
+
896
+ # If a 2D or 3D attention mask is provided for the cross-attention
897
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
898
+ if self.config.is_decoder and encoder_hidden_states is not None:
899
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
900
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
901
+ if encoder_attention_mask is None:
902
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
903
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
904
+ else:
905
+ encoder_extended_attention_mask = None
906
+
907
+ # Prepare head mask if needed
908
+ # 1.0 in head_mask indicate we keep the head
909
+ # attention_probs has shape bsz x n_heads x N x N
910
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
911
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
912
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
913
+
914
+ embedding_output = self.embeddings(
915
+ input_ids=input_ids,
916
+ position_ids=position_ids,
917
+ token_type_ids=token_type_ids,
918
+ inputs_embeds=inputs_embeds,
919
+ past_key_values_length=past_key_values_length,
920
+ )
921
+
922
+ # if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token
923
+ if bbox.shape[-1] == 4:
924
+ bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
925
+ scaled_bbox = bbox * self.config.bbox_scale
926
+ bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
927
+
928
+ encoder_outputs = self.encoder(
929
+ embedding_output,
930
+ bbox_pos_emb=bbox_position_embeddings,
931
+ attention_mask=extended_attention_mask,
932
+ head_mask=head_mask,
933
+ encoder_hidden_states=encoder_hidden_states,
934
+ encoder_attention_mask=encoder_extended_attention_mask,
935
+ past_key_values=past_key_values,
936
+ use_cache=use_cache,
937
+ output_attentions=output_attentions,
938
+ output_hidden_states=output_hidden_states,
939
+ return_dict=return_dict,
940
+ )
941
+ sequence_output = encoder_outputs[0]
942
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
943
+
944
+ if not return_dict:
945
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
946
+
947
+ return BaseModelOutputWithPoolingAndCrossAttentions(
948
+ last_hidden_state=sequence_output,
949
+ pooler_output=pooled_output,
950
+ past_key_values=encoder_outputs.past_key_values,
951
+ hidden_states=encoder_outputs.hidden_states,
952
+ attentions=encoder_outputs.attentions,
953
+ cross_attentions=encoder_outputs.cross_attentions,
954
+ )
955
+
956
+
957
+ @add_start_docstrings(
958
+ """
959
+ Bros Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
960
+ Named-Entity-Recognition (NER) tasks.
961
+ """,
962
+ BROS_START_DOCSTRING,
963
+ )
964
+ class BrosForTokenClassification(BrosPreTrainedModel):
965
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
966
+
967
+ def __init__(self, config):
968
+ super().__init__(config)
969
+ self.num_labels = config.num_labels
970
+
971
+ self.bros = BrosModel(config)
972
+ classifier_dropout = (
973
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
974
+ )
975
+ self.dropout = nn.Dropout(classifier_dropout)
976
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
977
+
978
+ self.init_weights()
979
+
980
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
981
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
982
+ def forward(
983
+ self,
984
+ input_ids: Optional[torch.Tensor] = None,
985
+ bbox: Optional[torch.Tensor] = None,
986
+ attention_mask: Optional[torch.Tensor] = None,
987
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
988
+ token_type_ids: Optional[torch.Tensor] = None,
989
+ position_ids: Optional[torch.Tensor] = None,
990
+ head_mask: Optional[torch.Tensor] = None,
991
+ inputs_embeds: Optional[torch.Tensor] = None,
992
+ labels: Optional[torch.Tensor] = None,
993
+ output_attentions: Optional[bool] = None,
994
+ output_hidden_states: Optional[bool] = None,
995
+ return_dict: Optional[bool] = None,
996
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
997
+ r"""
998
+
999
+ Returns:
1000
+
1001
+ Examples:
1002
+
1003
+ ```python
1004
+ >>> import torch
1005
+ >>> from transformers import BrosProcessor, BrosForTokenClassification
1006
+
1007
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1008
+
1009
+ >>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1010
+
1011
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1012
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1013
+ >>> encoding["bbox"] = bbox
1014
+
1015
+ >>> outputs = model(**encoding)
1016
+ ```"""
1017
+
1018
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1019
+
1020
+ outputs = self.bros(
1021
+ input_ids,
1022
+ bbox=bbox,
1023
+ attention_mask=attention_mask,
1024
+ token_type_ids=token_type_ids,
1025
+ position_ids=position_ids,
1026
+ head_mask=head_mask,
1027
+ inputs_embeds=inputs_embeds,
1028
+ output_attentions=output_attentions,
1029
+ output_hidden_states=output_hidden_states,
1030
+ return_dict=return_dict,
1031
+ )
1032
+
1033
+ sequence_output = outputs[0]
1034
+
1035
+ sequence_output = self.dropout(sequence_output)
1036
+ logits = self.classifier(sequence_output)
1037
+
1038
+ loss = None
1039
+ if labels is not None:
1040
+ loss_fct = CrossEntropyLoss()
1041
+ if bbox_first_token_mask is not None:
1042
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
1043
+ loss = loss_fct(
1044
+ logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]
1045
+ )
1046
+ else:
1047
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1048
+
1049
+ if not return_dict:
1050
+ output = (logits,) + outputs[2:]
1051
+ return ((loss,) + output) if loss is not None else output
1052
+
1053
+ return TokenClassifierOutput(
1054
+ loss=loss,
1055
+ logits=logits,
1056
+ hidden_states=outputs.hidden_states,
1057
+ attentions=outputs.attentions,
1058
+ )
1059
+
1060
+
1061
+ @add_start_docstrings(
1062
+ """
1063
+ Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the
1064
+ hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to
1065
+ predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent
1066
+ tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors
1067
+ since it predicts next token from one token.
1068
+ """,
1069
+ BROS_START_DOCSTRING,
1070
+ )
1071
+ class BrosSpadeEEForTokenClassification(BrosPreTrainedModel):
1072
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1073
+
1074
+ def __init__(self, config):
1075
+ super().__init__(config)
1076
+ self.config = config
1077
+ self.num_labels = config.num_labels
1078
+ self.n_relations = config.n_relations
1079
+ self.backbone_hidden_size = config.hidden_size
1080
+
1081
+ self.bros = BrosModel(config)
1082
+ classifier_dropout = (
1083
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
1084
+ )
1085
+
1086
+ # Initial token classification for Entity Extraction (NER)
1087
+ self.initial_token_classifier = nn.Sequential(
1088
+ nn.Dropout(classifier_dropout),
1089
+ nn.Linear(config.hidden_size, config.hidden_size),
1090
+ nn.Dropout(classifier_dropout),
1091
+ nn.Linear(config.hidden_size, config.num_labels),
1092
+ )
1093
+
1094
+ # Subsequent token classification for Entity Extraction (NER)
1095
+ self.subsequent_token_classifier = BrosRelationExtractor(config)
1096
+
1097
+ self.init_weights()
1098
+
1099
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1100
+ @replace_return_docstrings(output_type=BrosSpadeOutput, config_class=_CONFIG_FOR_DOC)
1101
+ def forward(
1102
+ self,
1103
+ input_ids: Optional[torch.Tensor] = None,
1104
+ bbox: Optional[torch.Tensor] = None,
1105
+ attention_mask: Optional[torch.Tensor] = None,
1106
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
1107
+ token_type_ids: Optional[torch.Tensor] = None,
1108
+ position_ids: Optional[torch.Tensor] = None,
1109
+ head_mask: Optional[torch.Tensor] = None,
1110
+ inputs_embeds: Optional[torch.Tensor] = None,
1111
+ initial_token_labels: Optional[torch.Tensor] = None,
1112
+ subsequent_token_labels: Optional[torch.Tensor] = None,
1113
+ output_attentions: Optional[bool] = None,
1114
+ output_hidden_states: Optional[bool] = None,
1115
+ return_dict: Optional[bool] = None,
1116
+ ) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]:
1117
+ r"""
1118
+ Returns:
1119
+
1120
+ Examples:
1121
+
1122
+ ```python
1123
+ >>> import torch
1124
+ >>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification
1125
+
1126
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1127
+
1128
+ >>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1129
+
1130
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1131
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1132
+ >>> encoding["bbox"] = bbox
1133
+
1134
+ >>> outputs = model(**encoding)
1135
+ ```"""
1136
+
1137
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1138
+
1139
+ outputs = self.bros(
1140
+ input_ids=input_ids,
1141
+ bbox=bbox,
1142
+ attention_mask=attention_mask,
1143
+ token_type_ids=token_type_ids,
1144
+ position_ids=position_ids,
1145
+ head_mask=head_mask,
1146
+ inputs_embeds=inputs_embeds,
1147
+ output_attentions=output_attentions,
1148
+ output_hidden_states=output_hidden_states,
1149
+ return_dict=return_dict,
1150
+ )
1151
+
1152
+ last_hidden_states = outputs[0]
1153
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
1154
+ initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous()
1155
+ subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0)
1156
+
1157
+ # make subsequent token (sequence token classification) mask
1158
+ inv_attention_mask = 1 - attention_mask
1159
+ batch_size, max_seq_length = inv_attention_mask.shape
1160
+ device = inv_attention_mask.device
1161
+ invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool()
1162
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
1163
+ invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min
1164
+ )
1165
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
1166
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
1167
+ self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min
1168
+ )
1169
+ subsequent_token_mask = attention_mask.view(-1).bool()
1170
+
1171
+ loss = None
1172
+ if initial_token_labels is not None and subsequent_token_labels is not None:
1173
+ loss_fct = CrossEntropyLoss()
1174
+
1175
+ # get initial token loss
1176
+ initial_token_labels = initial_token_labels.view(-1)
1177
+ if bbox_first_token_mask is not None:
1178
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
1179
+ initial_token_loss = loss_fct(
1180
+ initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask],
1181
+ initial_token_labels[bbox_first_token_mask],
1182
+ )
1183
+ else:
1184
+ initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels)
1185
+
1186
+ subsequent_token_labels = subsequent_token_labels.view(-1)
1187
+ subsequent_token_loss = loss_fct(
1188
+ subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask],
1189
+ subsequent_token_labels[subsequent_token_mask],
1190
+ )
1191
+
1192
+ loss = initial_token_loss + subsequent_token_loss
1193
+
1194
+ if not return_dict:
1195
+ output = (initial_token_logits, subsequent_token_logits) + outputs[2:]
1196
+ return ((loss,) + output) if loss is not None else output
1197
+
1198
+ return BrosSpadeOutput(
1199
+ loss=loss,
1200
+ initial_token_logits=initial_token_logits,
1201
+ subsequent_token_logits=subsequent_token_logits,
1202
+ hidden_states=outputs.hidden_states,
1203
+ attentions=outputs.attentions,
1204
+ )
1205
+
1206
+
1207
+ @add_start_docstrings(
1208
+ """
1209
+ Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.
1210
+ for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).
1211
+ """,
1212
+ BROS_START_DOCSTRING,
1213
+ )
1214
+ class BrosSpadeELForTokenClassification(BrosPreTrainedModel):
1215
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1216
+
1217
+ def __init__(self, config):
1218
+ super().__init__(config)
1219
+ self.config = config
1220
+ self.num_labels = config.num_labels
1221
+ self.n_relations = config.n_relations
1222
+ self.backbone_hidden_size = config.hidden_size
1223
+
1224
+ self.bros = BrosModel(config)
1225
+ (config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob)
1226
+
1227
+ self.entity_linker = BrosRelationExtractor(config)
1228
+
1229
+ self.init_weights()
1230
+
1231
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1232
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1233
+ def forward(
1234
+ self,
1235
+ input_ids: Optional[torch.Tensor] = None,
1236
+ bbox: Optional[torch.Tensor] = None,
1237
+ attention_mask: Optional[torch.Tensor] = None,
1238
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
1239
+ token_type_ids: Optional[torch.Tensor] = None,
1240
+ position_ids: Optional[torch.Tensor] = None,
1241
+ head_mask: Optional[torch.Tensor] = None,
1242
+ inputs_embeds: Optional[torch.Tensor] = None,
1243
+ labels: Optional[torch.Tensor] = None,
1244
+ output_attentions: Optional[bool] = None,
1245
+ output_hidden_states: Optional[bool] = None,
1246
+ return_dict: Optional[bool] = None,
1247
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1248
+ r"""
1249
+ Returns:
1250
+
1251
+ Examples:
1252
+
1253
+ ```python
1254
+ >>> import torch
1255
+ >>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification
1256
+
1257
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1258
+
1259
+ >>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1260
+
1261
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1262
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1263
+ >>> encoding["bbox"] = bbox
1264
+
1265
+ >>> outputs = model(**encoding)
1266
+ ```"""
1267
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1268
+
1269
+ outputs = self.bros(
1270
+ input_ids=input_ids,
1271
+ bbox=bbox,
1272
+ attention_mask=attention_mask,
1273
+ token_type_ids=token_type_ids,
1274
+ position_ids=position_ids,
1275
+ head_mask=head_mask,
1276
+ inputs_embeds=inputs_embeds,
1277
+ output_attentions=output_attentions,
1278
+ output_hidden_states=output_hidden_states,
1279
+ return_dict=return_dict,
1280
+ )
1281
+
1282
+ last_hidden_states = outputs[0]
1283
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
1284
+
1285
+ logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0)
1286
+
1287
+ loss = None
1288
+ if labels is not None:
1289
+ loss_fct = CrossEntropyLoss()
1290
+
1291
+ batch_size, max_seq_length = attention_mask.shape
1292
+ device = attention_mask.device
1293
+
1294
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
1295
+
1296
+ mask = bbox_first_token_mask.view(-1)
1297
+ bbox_first_token_mask = torch.cat(
1298
+ [
1299
+ ~bbox_first_token_mask,
1300
+ torch.zeros([batch_size, 1], dtype=torch.bool).to(device),
1301
+ ],
1302
+ axis=1,
1303
+ )
1304
+ logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min)
1305
+ logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min)
1306
+
1307
+ loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask])
1308
+
1309
+ if not return_dict:
1310
+ output = (logits,) + outputs[2:]
1311
+ return ((loss,) + output) if loss is not None else output
1312
+
1313
+ return TokenClassifierOutput(
1314
+ loss=loss,
1315
+ logits=logits,
1316
+ hidden_states=outputs.hidden_states,
1317
+ attentions=outputs.attentions,
1318
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Bros.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class BrosProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a Bros processor which wraps a BERT tokenizer.
29
+
30
+ [`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
31
+ [`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
32
+
33
+ Args:
34
+ tokenizer (`BertTokenizerFast`, *optional*):
35
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
36
+ """
37
+
38
+ attributes = ["tokenizer"]
39
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
40
+
41
+ def __init__(self, tokenizer=None, **kwargs):
42
+ if tokenizer is None:
43
+ raise ValueError("You need to specify a `tokenizer`.")
44
+
45
+ super().__init__(tokenizer)
46
+
47
+ def __call__(
48
+ self,
49
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
50
+ add_special_tokens: bool = True,
51
+ padding: Union[bool, str, PaddingStrategy] = False,
52
+ truncation: Union[bool, str, TruncationStrategy] = None,
53
+ max_length: Optional[int] = None,
54
+ stride: int = 0,
55
+ pad_to_multiple_of: Optional[int] = None,
56
+ return_token_type_ids: Optional[bool] = None,
57
+ return_attention_mask: Optional[bool] = None,
58
+ return_overflowing_tokens: bool = False,
59
+ return_special_tokens_mask: bool = False,
60
+ return_offsets_mapping: bool = False,
61
+ return_length: bool = False,
62
+ verbose: bool = True,
63
+ return_tensors: Optional[Union[str, TensorType]] = None,
64
+ **kwargs,
65
+ ) -> BatchEncoding:
66
+ """
67
+ This method uses [`BertTokenizerFast.__call__`] to prepare text for the model.
68
+
69
+ Please refer to the docstring of the above two methods for more information.
70
+ """
71
+ encoding = self.tokenizer(
72
+ text=text,
73
+ add_special_tokens=add_special_tokens,
74
+ padding=padding,
75
+ truncation=truncation,
76
+ max_length=max_length,
77
+ stride=stride,
78
+ pad_to_multiple_of=pad_to_multiple_of,
79
+ return_token_type_ids=return_token_type_ids,
80
+ return_attention_mask=return_attention_mask,
81
+ return_overflowing_tokens=return_overflowing_tokens,
82
+ return_special_tokens_mask=return_special_tokens_mask,
83
+ return_offsets_mapping=return_offsets_mapping,
84
+ return_length=return_length,
85
+ verbose=verbose,
86
+ return_tensors=return_tensors,
87
+ **kwargs,
88
+ )
89
+
90
+ return encoding
91
+
92
+ def batch_decode(self, *args, **kwargs):
93
+ """
94
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
95
+ refer to the docstring of this method for more information.
96
+ """
97
+ return self.tokenizer.batch_decode(*args, **kwargs)
98
+
99
+ def decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
102
+ the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.decode(*args, **kwargs)
105
+
106
+ @property
107
+ def model_input_names(self):
108
+ tokenizer_input_names = self.tokenizer.model_input_names
109
+ return list(dict.fromkeys(tokenizer_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"]
31
+ _import_structure["image_processing_deformable_detr"] = ["DeformableDetrImageProcessor"]
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_deformable_detr"] = [
40
+ "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "DeformableDetrForObjectDetection",
42
+ "DeformableDetrModel",
43
+ "DeformableDetrPreTrainedModel",
44
+ ]
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig
49
+
50
+ try:
51
+ if not is_vision_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor
57
+ from .image_processing_deformable_detr import DeformableDetrImageProcessor
58
+
59
+ try:
60
+ if not is_torch_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .modeling_deformable_detr import (
66
+ DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
67
+ DeformableDetrForObjectDetection,
68
+ DeformableDetrModel,
69
+ DeformableDetrPreTrainedModel,
70
+ )
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc ADDED
Binary file (6.84 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc ADDED
Binary file (51.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc ADDED
Binary file (89.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Deformable DETR model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto import CONFIG_MAPPING
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class DeformableDetrConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate
31
+ a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a
32
+ configuration with the defaults will yield a similar configuration to that of the Deformable DETR
33
+ [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
40
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
41
+ API.
42
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
43
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
44
+ case it will default to `ResNetConfig()`.
45
+ num_channels (`int`, *optional*, defaults to 3):
46
+ The number of input channels.
47
+ num_queries (`int`, *optional*, defaults to 300):
48
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
49
+ [`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use
50
+ `two_stage_num_proposals` instead.
51
+ d_model (`int`, *optional*, defaults to 256):
52
+ Dimension of the layers.
53
+ encoder_layers (`int`, *optional*, defaults to 6):
54
+ Number of encoder layers.
55
+ decoder_layers (`int`, *optional*, defaults to 6):
56
+ Number of decoder layers.
57
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
60
+ Number of attention heads for each attention layer in the Transformer decoder.
61
+ decoder_ffn_dim (`int`, *optional*, defaults to 1024):
62
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
63
+ encoder_ffn_dim (`int`, *optional*, defaults to 1024):
64
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
65
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
66
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
67
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
68
+ dropout (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
70
+ attention_dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout ratio for the attention probabilities.
72
+ activation_dropout (`float`, *optional*, defaults to 0.0):
73
+ The dropout ratio for activations inside the fully connected layer.
74
+ init_std (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ init_xavier_std (`float`, *optional*, defaults to 1):
77
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
78
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
82
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
83
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
84
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
85
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
86
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
87
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
88
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
89
+ use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
90
+ Whether to use pretrained weights for the backbone.
91
+ backbone_kwargs (`dict`, *optional*):
92
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
93
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
94
+ dilation (`bool`, *optional*, defaults to `False`):
95
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
96
+ `use_timm_backbone` = `True`.
97
+ class_cost (`float`, *optional*, defaults to 1):
98
+ Relative weight of the classification error in the Hungarian matching cost.
99
+ bbox_cost (`float`, *optional*, defaults to 5):
100
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
101
+ giou_cost (`float`, *optional*, defaults to 2):
102
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
103
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
104
+ Relative weight of the Focal loss in the panoptic segmentation loss.
105
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
106
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
107
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
108
+ Relative weight of the L1 bounding box loss in the object detection loss.
109
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
110
+ Relative weight of the generalized IoU loss in the object detection loss.
111
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
112
+ Relative classification weight of the 'no-object' class in the object detection loss.
113
+ num_feature_levels (`int`, *optional*, defaults to 4):
114
+ The number of input feature levels.
115
+ encoder_n_points (`int`, *optional*, defaults to 4):
116
+ The number of sampled keys in each feature level for each attention head in the encoder.
117
+ decoder_n_points (`int`, *optional*, defaults to 4):
118
+ The number of sampled keys in each feature level for each attention head in the decoder.
119
+ two_stage (`bool`, *optional*, defaults to `False`):
120
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
121
+ Deformable DETR, which are further fed into the decoder for iterative bounding box refinement.
122
+ two_stage_num_proposals (`int`, *optional*, defaults to 300):
123
+ The number of region proposals to be generated, in case `two_stage` is set to `True`.
124
+ with_box_refine (`bool`, *optional*, defaults to `False`):
125
+ Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
126
+ based on the predictions from the previous layer.
127
+ focal_alpha (`float`, *optional*, defaults to 0.25):
128
+ Alpha parameter in the focal loss.
129
+ disable_custom_kernels (`bool`, *optional*, defaults to `False`):
130
+ Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
131
+ kernels are not supported by PyTorch ONNX export.
132
+
133
+ Examples:
134
+
135
+ ```python
136
+ >>> from transformers import DeformableDetrConfig, DeformableDetrModel
137
+
138
+ >>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration
139
+ >>> configuration = DeformableDetrConfig()
140
+
141
+ >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
142
+ >>> model = DeformableDetrModel(configuration)
143
+
144
+ >>> # Accessing the model configuration
145
+ >>> configuration = model.config
146
+ ```"""
147
+
148
+ model_type = "deformable_detr"
149
+ attribute_map = {
150
+ "hidden_size": "d_model",
151
+ "num_attention_heads": "encoder_attention_heads",
152
+ }
153
+
154
+ def __init__(
155
+ self,
156
+ use_timm_backbone=True,
157
+ backbone_config=None,
158
+ num_channels=3,
159
+ num_queries=300,
160
+ max_position_embeddings=1024,
161
+ encoder_layers=6,
162
+ encoder_ffn_dim=1024,
163
+ encoder_attention_heads=8,
164
+ decoder_layers=6,
165
+ decoder_ffn_dim=1024,
166
+ decoder_attention_heads=8,
167
+ encoder_layerdrop=0.0,
168
+ is_encoder_decoder=True,
169
+ activation_function="relu",
170
+ d_model=256,
171
+ dropout=0.1,
172
+ attention_dropout=0.0,
173
+ activation_dropout=0.0,
174
+ init_std=0.02,
175
+ init_xavier_std=1.0,
176
+ return_intermediate=True,
177
+ auxiliary_loss=False,
178
+ position_embedding_type="sine",
179
+ backbone="resnet50",
180
+ use_pretrained_backbone=True,
181
+ backbone_kwargs=None,
182
+ dilation=False,
183
+ num_feature_levels=4,
184
+ encoder_n_points=4,
185
+ decoder_n_points=4,
186
+ two_stage=False,
187
+ two_stage_num_proposals=300,
188
+ with_box_refine=False,
189
+ class_cost=1,
190
+ bbox_cost=5,
191
+ giou_cost=2,
192
+ mask_loss_coefficient=1,
193
+ dice_loss_coefficient=1,
194
+ bbox_loss_coefficient=5,
195
+ giou_loss_coefficient=2,
196
+ eos_coefficient=0.1,
197
+ focal_alpha=0.25,
198
+ disable_custom_kernels=False,
199
+ **kwargs,
200
+ ):
201
+ if not use_timm_backbone and use_pretrained_backbone:
202
+ raise ValueError(
203
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
204
+ )
205
+
206
+ if backbone_config is not None and backbone is not None:
207
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
208
+
209
+ if backbone_config is not None and use_timm_backbone:
210
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
211
+
212
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
213
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
214
+
215
+ if not use_timm_backbone:
216
+ if backbone_config is None:
217
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
218
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
219
+ elif isinstance(backbone_config, dict):
220
+ backbone_model_type = backbone_config.get("model_type")
221
+ config_class = CONFIG_MAPPING[backbone_model_type]
222
+ backbone_config = config_class.from_dict(backbone_config)
223
+ self.use_timm_backbone = use_timm_backbone
224
+ self.backbone_config = backbone_config
225
+ self.num_channels = num_channels
226
+ self.num_queries = num_queries
227
+ self.max_position_embeddings = max_position_embeddings
228
+ self.d_model = d_model
229
+ self.encoder_ffn_dim = encoder_ffn_dim
230
+ self.encoder_layers = encoder_layers
231
+ self.encoder_attention_heads = encoder_attention_heads
232
+ self.decoder_ffn_dim = decoder_ffn_dim
233
+ self.decoder_layers = decoder_layers
234
+ self.decoder_attention_heads = decoder_attention_heads
235
+ self.dropout = dropout
236
+ self.attention_dropout = attention_dropout
237
+ self.activation_dropout = activation_dropout
238
+ self.activation_function = activation_function
239
+ self.init_std = init_std
240
+ self.init_xavier_std = init_xavier_std
241
+ self.encoder_layerdrop = encoder_layerdrop
242
+ self.auxiliary_loss = auxiliary_loss
243
+ self.position_embedding_type = position_embedding_type
244
+ self.backbone = backbone
245
+ self.use_pretrained_backbone = use_pretrained_backbone
246
+ self.backbone_kwargs = backbone_kwargs
247
+ self.dilation = dilation
248
+ # deformable attributes
249
+ self.num_feature_levels = num_feature_levels
250
+ self.encoder_n_points = encoder_n_points
251
+ self.decoder_n_points = decoder_n_points
252
+ self.two_stage = two_stage
253
+ self.two_stage_num_proposals = two_stage_num_proposals
254
+ self.with_box_refine = with_box_refine
255
+ if two_stage is True and with_box_refine is False:
256
+ raise ValueError("If two_stage is True, with_box_refine must be True.")
257
+ # Hungarian matcher
258
+ self.class_cost = class_cost
259
+ self.bbox_cost = bbox_cost
260
+ self.giou_cost = giou_cost
261
+ # Loss coefficients
262
+ self.mask_loss_coefficient = mask_loss_coefficient
263
+ self.dice_loss_coefficient = dice_loss_coefficient
264
+ self.bbox_loss_coefficient = bbox_loss_coefficient
265
+ self.giou_loss_coefficient = giou_loss_coefficient
266
+ self.eos_coefficient = eos_coefficient
267
+ self.focal_alpha = focal_alpha
268
+ self.disable_custom_kernels = disable_custom_kernels
269
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
270
+
271
+ @property
272
+ def num_attention_heads(self) -> int:
273
+ return self.encoder_attention_heads
274
+
275
+ @property
276
+ def hidden_size(self) -> int:
277
+ return self.d_model
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Deformable DETR checkpoints."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import cached_download, hf_hub_url
25
+ from PIL import Image
26
+
27
+ from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DeformableDetrImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def rename_key(orig_key):
36
+ if "backbone.0.body" in orig_key:
37
+ orig_key = orig_key.replace("backbone.0.body", "backbone.conv_encoder.model")
38
+ if "transformer" in orig_key:
39
+ orig_key = orig_key.replace("transformer.", "")
40
+ if "norm1" in orig_key:
41
+ if "encoder" in orig_key:
42
+ orig_key = orig_key.replace("norm1", "self_attn_layer_norm")
43
+ else:
44
+ orig_key = orig_key.replace("norm1", "encoder_attn_layer_norm")
45
+ if "norm2" in orig_key:
46
+ if "encoder" in orig_key:
47
+ orig_key = orig_key.replace("norm2", "final_layer_norm")
48
+ else:
49
+ orig_key = orig_key.replace("norm2", "self_attn_layer_norm")
50
+ if "norm3" in orig_key:
51
+ orig_key = orig_key.replace("norm3", "final_layer_norm")
52
+ if "linear1" in orig_key:
53
+ orig_key = orig_key.replace("linear1", "fc1")
54
+ if "linear2" in orig_key:
55
+ orig_key = orig_key.replace("linear2", "fc2")
56
+ if "query_embed" in orig_key:
57
+ orig_key = orig_key.replace("query_embed", "query_position_embeddings")
58
+ if "cross_attn" in orig_key:
59
+ orig_key = orig_key.replace("cross_attn", "encoder_attn")
60
+
61
+ return orig_key
62
+
63
+
64
+ def read_in_q_k_v(state_dict):
65
+ # transformer decoder self-attention layers
66
+ for i in range(6):
67
+ # read in weights + bias of input projection layer of self-attention
68
+ in_proj_weight = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_weight")
69
+ in_proj_bias = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_bias")
70
+ # next, add query, keys and values (in that order) to the state dict
71
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
72
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
73
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
74
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
75
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
76
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
77
+
78
+
79
+ # We will verify our results on an image of cute cats
80
+ def prepare_img():
81
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
82
+ im = Image.open(requests.get(url, stream=True).raw)
83
+
84
+ return im
85
+
86
+
87
+ @torch.no_grad()
88
+ def convert_deformable_detr_checkpoint(
89
+ checkpoint_path,
90
+ single_scale,
91
+ dilation,
92
+ with_box_refine,
93
+ two_stage,
94
+ pytorch_dump_folder_path,
95
+ push_to_hub,
96
+ ):
97
+ """
98
+ Copy/paste/tweak model's weights to our Deformable DETR structure.
99
+ """
100
+
101
+ # load default config
102
+ config = DeformableDetrConfig()
103
+ # set config attributes
104
+ if single_scale:
105
+ config.num_feature_levels = 1
106
+ config.dilation = dilation
107
+ config.with_box_refine = with_box_refine
108
+ config.two_stage = two_stage
109
+ # set labels
110
+ config.num_labels = 91
111
+ repo_id = "huggingface/label-files"
112
+ filename = "coco-detection-id2label.json"
113
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
114
+ id2label = {int(k): v for k, v in id2label.items()}
115
+ config.id2label = id2label
116
+ config.label2id = {v: k for k, v in id2label.items()}
117
+
118
+ # load image processor
119
+ image_processor = DeformableDetrImageProcessor(format="coco_detection")
120
+
121
+ # prepare image
122
+ img = prepare_img()
123
+ encoding = image_processor(images=img, return_tensors="pt")
124
+ pixel_values = encoding["pixel_values"]
125
+
126
+ logger.info("Converting model...")
127
+
128
+ # load original state dict
129
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
130
+ # rename keys
131
+ for key in state_dict.copy().keys():
132
+ val = state_dict.pop(key)
133
+ state_dict[rename_key(key)] = val
134
+ # query, key and value matrices need special treatment
135
+ read_in_q_k_v(state_dict)
136
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
137
+ prefix = "model."
138
+ for key in state_dict.copy().keys():
139
+ if not key.startswith("class_embed") and not key.startswith("bbox_embed"):
140
+ val = state_dict.pop(key)
141
+ state_dict[prefix + key] = val
142
+ # finally, create HuggingFace model and load state dict
143
+ model = DeformableDetrForObjectDetection(config)
144
+ model.load_state_dict(state_dict)
145
+ model.eval()
146
+
147
+ device = "cuda" if torch.cuda.is_available() else "cpu"
148
+ model.to(device)
149
+ # verify our conversion
150
+ outputs = model(pixel_values.to(device))
151
+
152
+ expected_logits = torch.tensor(
153
+ [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]]
154
+ )
155
+ expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]])
156
+
157
+ if single_scale:
158
+ expected_logits = torch.tensor(
159
+ [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]]
160
+ )
161
+ expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]])
162
+
163
+ if single_scale and dilation:
164
+ expected_logits = torch.tensor(
165
+ [[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]]
166
+ )
167
+ expected_boxes = torch.tensor([[0.7665, 0.4130, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]])
168
+
169
+ if with_box_refine:
170
+ expected_logits = torch.tensor(
171
+ [[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]]
172
+ )
173
+ expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]])
174
+
175
+ if with_box_refine and two_stage:
176
+ expected_logits = torch.tensor(
177
+ [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]]
178
+ )
179
+ expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]])
180
+
181
+ print("Logits:", outputs.logits[0, :3, :3])
182
+
183
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
184
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
185
+
186
+ print("Everything ok!")
187
+
188
+ # Save model and image processor
189
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
190
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
191
+ model.save_pretrained(pytorch_dump_folder_path)
192
+ image_processor.save_pretrained(pytorch_dump_folder_path)
193
+
194
+ # Push to hub
195
+ if push_to_hub:
196
+ model_name = "deformable-detr"
197
+ model_name += "-single-scale" if single_scale else ""
198
+ model_name += "-dc5" if dilation else ""
199
+ model_name += "-with-box-refine" if with_box_refine else ""
200
+ model_name += "-two-stage" if two_stage else ""
201
+ print("Pushing model to hub...")
202
+ model.push_to_hub(repo_path_or_name=model_name, organization="nielsr", commit_message="Add model")
203
+
204
+
205
+ if __name__ == "__main__":
206
+ parser = argparse.ArgumentParser()
207
+
208
+ parser.add_argument(
209
+ "--checkpoint_path",
210
+ type=str,
211
+ default="/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth",
212
+ help="Path to Pytorch checkpoint (.pth file) you'd like to convert.",
213
+ )
214
+ parser.add_argument("--single_scale", action="store_true", help="Whether to set config.num_features_levels = 1.")
215
+ parser.add_argument("--dilation", action="store_true", help="Whether to set config.dilation=True.")
216
+ parser.add_argument("--with_box_refine", action="store_true", help="Whether to set config.with_box_refine=True.")
217
+ parser.add_argument("--two_stage", action="store_true", help="Whether to set config.two_stage=True.")
218
+ parser.add_argument(
219
+ "--pytorch_dump_folder_path",
220
+ default=None,
221
+ type=str,
222
+ required=True,
223
+ help="Path to the folder to output PyTorch model.",
224
+ )
225
+ parser.add_argument(
226
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
227
+ )
228
+ args = parser.parse_args()
229
+ convert_deformable_detr_checkpoint(
230
+ args.checkpoint_path,
231
+ args.single_scale,
232
+ args.dilation,
233
+ args.with_box_refine,
234
+ args.two_stage,
235
+ args.pytorch_dump_folder_path,
236
+ args.push_to_hub,
237
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Deformable DETR."""
16
+
17
+ import warnings
18
+
19
+ from ...image_transforms import rgb_to_id as _rgb_to_id
20
+ from ...utils import logging
21
+ from .image_processing_deformable_detr import DeformableDetrImageProcessor
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def rgb_to_id(x):
28
+ warnings.warn(
29
+ "rgb_to_id has moved and will not be importable from this module from v5. "
30
+ "Please import from transformers.image_transforms instead.",
31
+ FutureWarning,
32
+ )
33
+ return _rgb_to_id(x)
34
+
35
+
36
+ class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor):
37
+ def __init__(self, *args, **kwargs) -> None:
38
+ warnings.warn(
39
+ "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
40
+ " Please use DeformableDetrImageProcessor instead.",
41
+ FutureWarning,
42
+ )
43
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py ADDED
@@ -0,0 +1,1553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Deformable DETR."""
16
+
17
+ import io
18
+ import pathlib
19
+ from collections import defaultdict
20
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...feature_extraction_utils import BatchFeature
25
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
26
+ from ...image_transforms import (
27
+ PaddingMode,
28
+ center_to_corners_format,
29
+ corners_to_center_format,
30
+ id_to_rgb,
31
+ pad,
32
+ rescale,
33
+ resize,
34
+ rgb_to_id,
35
+ to_channel_dimension_format,
36
+ )
37
+ from ...image_utils import (
38
+ IMAGENET_DEFAULT_MEAN,
39
+ IMAGENET_DEFAULT_STD,
40
+ AnnotationFormat,
41
+ AnnotationType,
42
+ ChannelDimension,
43
+ ImageInput,
44
+ PILImageResampling,
45
+ get_image_size,
46
+ infer_channel_dimension_format,
47
+ is_scaled_image,
48
+ make_list_of_images,
49
+ to_numpy_array,
50
+ valid_images,
51
+ validate_annotations,
52
+ validate_kwargs,
53
+ validate_preprocess_arguments,
54
+ )
55
+ from ...utils import (
56
+ TensorType,
57
+ is_flax_available,
58
+ is_jax_tensor,
59
+ is_scipy_available,
60
+ is_tf_available,
61
+ is_tf_tensor,
62
+ is_torch_available,
63
+ is_torch_tensor,
64
+ is_vision_available,
65
+ logging,
66
+ )
67
+
68
+
69
+ if is_torch_available():
70
+ import torch
71
+ from torch import nn
72
+
73
+
74
+ if is_vision_available():
75
+ import PIL
76
+
77
+ if is_scipy_available():
78
+ import scipy.special
79
+ import scipy.stats
80
+
81
+
82
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
83
+
84
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
85
+
86
+
87
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
88
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
89
+ """
90
+ Computes the output image size given the input image size and the desired output size.
91
+
92
+ Args:
93
+ image_size (`Tuple[int, int]`):
94
+ The input image size.
95
+ size (`int`):
96
+ The desired output size.
97
+ max_size (`int`, *optional*):
98
+ The maximum allowed output size.
99
+ """
100
+ height, width = image_size
101
+ if max_size is not None:
102
+ min_original_size = float(min((height, width)))
103
+ max_original_size = float(max((height, width)))
104
+ if max_original_size / min_original_size * size > max_size:
105
+ size = int(round(max_size * min_original_size / max_original_size))
106
+
107
+ if (height <= width and height == size) or (width <= height and width == size):
108
+ return height, width
109
+
110
+ if width < height:
111
+ ow = size
112
+ oh = int(size * height / width)
113
+ else:
114
+ oh = size
115
+ ow = int(size * width / height)
116
+ return (oh, ow)
117
+
118
+
119
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
120
+ def get_resize_output_image_size(
121
+ input_image: np.ndarray,
122
+ size: Union[int, Tuple[int, int], List[int]],
123
+ max_size: Optional[int] = None,
124
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
125
+ ) -> Tuple[int, int]:
126
+ """
127
+ Computes the output image size given the input image size and the desired output size. If the desired output size
128
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
129
+ image size is computed by keeping the aspect ratio of the input image size.
130
+
131
+ Args:
132
+ input_image (`np.ndarray`):
133
+ The image to resize.
134
+ size (`int` or `Tuple[int, int]` or `List[int]`):
135
+ The desired output size.
136
+ max_size (`int`, *optional*):
137
+ The maximum allowed output size.
138
+ input_data_format (`ChannelDimension` or `str`, *optional*):
139
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
140
+ """
141
+ image_size = get_image_size(input_image, input_data_format)
142
+ if isinstance(size, (list, tuple)):
143
+ return size
144
+
145
+ return get_size_with_aspect_ratio(image_size, size, max_size)
146
+
147
+
148
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
149
+ def get_numpy_to_framework_fn(arr) -> Callable:
150
+ """
151
+ Returns a function that converts a numpy array to the framework of the input array.
152
+
153
+ Args:
154
+ arr (`np.ndarray`): The array to convert.
155
+ """
156
+ if isinstance(arr, np.ndarray):
157
+ return np.array
158
+ if is_tf_available() and is_tf_tensor(arr):
159
+ import tensorflow as tf
160
+
161
+ return tf.convert_to_tensor
162
+ if is_torch_available() and is_torch_tensor(arr):
163
+ import torch
164
+
165
+ return torch.tensor
166
+ if is_flax_available() and is_jax_tensor(arr):
167
+ import jax.numpy as jnp
168
+
169
+ return jnp.array
170
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
171
+
172
+
173
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
174
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
175
+ """
176
+ Squeezes an array, but only if the axis specified has dim 1.
177
+ """
178
+ if axis is None:
179
+ return arr.squeeze()
180
+
181
+ try:
182
+ return arr.squeeze(axis=axis)
183
+ except ValueError:
184
+ return arr
185
+
186
+
187
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
188
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
189
+ image_height, image_width = image_size
190
+ norm_annotation = {}
191
+ for key, value in annotation.items():
192
+ if key == "boxes":
193
+ boxes = value
194
+ boxes = corners_to_center_format(boxes)
195
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
196
+ norm_annotation[key] = boxes
197
+ else:
198
+ norm_annotation[key] = value
199
+ return norm_annotation
200
+
201
+
202
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
203
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
204
+ """
205
+ Return the maximum value across all indices of an iterable of values.
206
+ """
207
+ return [max(values_i) for values_i in zip(*values)]
208
+
209
+
210
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
211
+ def get_max_height_width(
212
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
213
+ ) -> List[int]:
214
+ """
215
+ Get the maximum height and width across all images in a batch.
216
+ """
217
+ if input_data_format is None:
218
+ input_data_format = infer_channel_dimension_format(images[0])
219
+
220
+ if input_data_format == ChannelDimension.FIRST:
221
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
222
+ elif input_data_format == ChannelDimension.LAST:
223
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
224
+ else:
225
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
226
+ return (max_height, max_width)
227
+
228
+
229
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
230
+ def make_pixel_mask(
231
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
232
+ ) -> np.ndarray:
233
+ """
234
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
235
+
236
+ Args:
237
+ image (`np.ndarray`):
238
+ Image to make the pixel mask for.
239
+ output_size (`Tuple[int, int]`):
240
+ Output size of the mask.
241
+ """
242
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
243
+ mask = np.zeros(output_size, dtype=np.int64)
244
+ mask[:input_height, :input_width] = 1
245
+ return mask
246
+
247
+
248
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
249
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
250
+ """
251
+ Convert a COCO polygon annotation to a mask.
252
+
253
+ Args:
254
+ segmentations (`List[List[float]]`):
255
+ List of polygons, each polygon represented by a list of x-y coordinates.
256
+ height (`int`):
257
+ Height of the mask.
258
+ width (`int`):
259
+ Width of the mask.
260
+ """
261
+ try:
262
+ from pycocotools import mask as coco_mask
263
+ except ImportError:
264
+ raise ImportError("Pycocotools is not installed in your environment.")
265
+
266
+ masks = []
267
+ for polygons in segmentations:
268
+ rles = coco_mask.frPyObjects(polygons, height, width)
269
+ mask = coco_mask.decode(rles)
270
+ if len(mask.shape) < 3:
271
+ mask = mask[..., None]
272
+ mask = np.asarray(mask, dtype=np.uint8)
273
+ mask = np.any(mask, axis=2)
274
+ masks.append(mask)
275
+ if masks:
276
+ masks = np.stack(masks, axis=0)
277
+ else:
278
+ masks = np.zeros((0, height, width), dtype=np.uint8)
279
+
280
+ return masks
281
+
282
+
283
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr
284
+ def prepare_coco_detection_annotation(
285
+ image,
286
+ target,
287
+ return_segmentation_masks: bool = False,
288
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
289
+ ):
290
+ """
291
+ Convert the target in COCO format into the format expected by DeformableDetr.
292
+ """
293
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
294
+
295
+ image_id = target["image_id"]
296
+ image_id = np.asarray([image_id], dtype=np.int64)
297
+
298
+ # Get all COCO annotations for the given image.
299
+ annotations = target["annotations"]
300
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
301
+
302
+ classes = [obj["category_id"] for obj in annotations]
303
+ classes = np.asarray(classes, dtype=np.int64)
304
+
305
+ # for conversion to coco api
306
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
307
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
308
+
309
+ boxes = [obj["bbox"] for obj in annotations]
310
+ # guard against no boxes via resizing
311
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
312
+ boxes[:, 2:] += boxes[:, :2]
313
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
314
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
315
+
316
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
317
+
318
+ new_target = {}
319
+ new_target["image_id"] = image_id
320
+ new_target["class_labels"] = classes[keep]
321
+ new_target["boxes"] = boxes[keep]
322
+ new_target["area"] = area[keep]
323
+ new_target["iscrowd"] = iscrowd[keep]
324
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
325
+
326
+ if annotations and "keypoints" in annotations[0]:
327
+ keypoints = [obj["keypoints"] for obj in annotations]
328
+ # Converting the filtered keypoints list to a numpy array
329
+ keypoints = np.asarray(keypoints, dtype=np.float32)
330
+ # Apply the keep mask here to filter the relevant annotations
331
+ keypoints = keypoints[keep]
332
+ num_keypoints = keypoints.shape[0]
333
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
334
+ new_target["keypoints"] = keypoints
335
+
336
+ if return_segmentation_masks:
337
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
338
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
339
+ new_target["masks"] = masks[keep]
340
+
341
+ return new_target
342
+
343
+
344
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
345
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
346
+ """
347
+ Compute the bounding boxes around the provided panoptic segmentation masks.
348
+
349
+ Args:
350
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
351
+
352
+ Returns:
353
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
354
+ """
355
+ if masks.size == 0:
356
+ return np.zeros((0, 4))
357
+
358
+ h, w = masks.shape[-2:]
359
+ y = np.arange(0, h, dtype=np.float32)
360
+ x = np.arange(0, w, dtype=np.float32)
361
+ # see https://github.com/pytorch/pytorch/issues/50276
362
+ y, x = np.meshgrid(y, x, indexing="ij")
363
+
364
+ x_mask = masks * np.expand_dims(x, axis=0)
365
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
366
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
367
+ x_min = x.filled(fill_value=1e8)
368
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
369
+
370
+ y_mask = masks * np.expand_dims(y, axis=0)
371
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
372
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
373
+ y_min = y.filled(fill_value=1e8)
374
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
375
+
376
+ return np.stack([x_min, y_min, x_max, y_max], 1)
377
+
378
+
379
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr
380
+ def prepare_coco_panoptic_annotation(
381
+ image: np.ndarray,
382
+ target: Dict,
383
+ masks_path: Union[str, pathlib.Path],
384
+ return_masks: bool = True,
385
+ input_data_format: Union[ChannelDimension, str] = None,
386
+ ) -> Dict:
387
+ """
388
+ Prepare a coco panoptic annotation for DeformableDetr.
389
+ """
390
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
391
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
392
+
393
+ new_target = {}
394
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
395
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
396
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
397
+
398
+ if "segments_info" in target:
399
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
400
+ masks = rgb_to_id(masks)
401
+
402
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
403
+ masks = masks == ids[:, None, None]
404
+ masks = masks.astype(np.uint8)
405
+ if return_masks:
406
+ new_target["masks"] = masks
407
+ new_target["boxes"] = masks_to_boxes(masks)
408
+ new_target["class_labels"] = np.array(
409
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
410
+ )
411
+ new_target["iscrowd"] = np.asarray(
412
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
413
+ )
414
+ new_target["area"] = np.asarray(
415
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
416
+ )
417
+
418
+ return new_target
419
+
420
+
421
+ # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
422
+ def get_segmentation_image(
423
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
424
+ ):
425
+ h, w = input_size
426
+ final_h, final_w = target_size
427
+
428
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
429
+
430
+ if m_id.shape[-1] == 0:
431
+ # We didn't detect any mask :(
432
+ m_id = np.zeros((h, w), dtype=np.int64)
433
+ else:
434
+ m_id = m_id.argmax(-1).reshape(h, w)
435
+
436
+ if deduplicate:
437
+ # Merge the masks corresponding to the same stuff class
438
+ for equiv in stuff_equiv_classes.values():
439
+ for eq_id in equiv:
440
+ m_id[m_id == eq_id] = equiv[0]
441
+
442
+ seg_img = id_to_rgb(m_id)
443
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
444
+ return seg_img
445
+
446
+
447
+ # Copied from transformers.models.detr.image_processing_detr.get_mask_area
448
+ def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
449
+ final_h, final_w = target_size
450
+ np_seg_img = seg_img.astype(np.uint8)
451
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
452
+ m_id = rgb_to_id(np_seg_img)
453
+ area = [(m_id == i).sum() for i in range(n_classes)]
454
+ return area
455
+
456
+
457
+ # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
458
+ def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
459
+ probs = scipy.special.softmax(logits, axis=-1)
460
+ labels = probs.argmax(-1, keepdims=True)
461
+ scores = np.take_along_axis(probs, labels, axis=-1)
462
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
463
+ return scores, labels
464
+
465
+
466
+ # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample
467
+ def post_process_panoptic_sample(
468
+ out_logits: np.ndarray,
469
+ masks: np.ndarray,
470
+ boxes: np.ndarray,
471
+ processed_size: Tuple[int, int],
472
+ target_size: Tuple[int, int],
473
+ is_thing_map: Dict,
474
+ threshold=0.85,
475
+ ) -> Dict:
476
+ """
477
+ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
478
+
479
+ Args:
480
+ out_logits (`torch.Tensor`):
481
+ The logits for this sample.
482
+ masks (`torch.Tensor`):
483
+ The predicted segmentation masks for this sample.
484
+ boxes (`torch.Tensor`):
485
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
486
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
487
+ processed_size (`Tuple[int, int]`):
488
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
489
+ after data augmentation but before batching.
490
+ target_size (`Tuple[int, int]`):
491
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
492
+ prediction.
493
+ is_thing_map (`Dict`):
494
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
495
+ threshold (`float`, *optional*, defaults to 0.85):
496
+ The threshold used to binarize the segmentation masks.
497
+ """
498
+ # we filter empty queries and detection below threshold
499
+ scores, labels = score_labels_from_class_probabilities(out_logits)
500
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
501
+
502
+ cur_scores = scores[keep]
503
+ cur_classes = labels[keep]
504
+ cur_boxes = center_to_corners_format(boxes[keep])
505
+
506
+ if len(cur_boxes) != len(cur_classes):
507
+ raise ValueError("Not as many boxes as there are classes")
508
+
509
+ cur_masks = masks[keep]
510
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
511
+ cur_masks = safe_squeeze(cur_masks, 1)
512
+ b, h, w = cur_masks.shape
513
+
514
+ # It may be that we have several predicted masks for the same stuff class.
515
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
516
+ cur_masks = cur_masks.reshape(b, -1)
517
+ stuff_equiv_classes = defaultdict(list)
518
+ for k, label in enumerate(cur_classes):
519
+ if not is_thing_map[label]:
520
+ stuff_equiv_classes[label].append(k)
521
+
522
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
523
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
524
+
525
+ # We filter out any mask that is too small
526
+ if cur_classes.size() > 0:
527
+ # We know filter empty masks as long as we find some
528
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
529
+ while filtered_small.any():
530
+ cur_masks = cur_masks[~filtered_small]
531
+ cur_scores = cur_scores[~filtered_small]
532
+ cur_classes = cur_classes[~filtered_small]
533
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
534
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
535
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
536
+ else:
537
+ cur_classes = np.ones((1, 1), dtype=np.int64)
538
+
539
+ segments_info = [
540
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
541
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
542
+ ]
543
+ del cur_classes
544
+
545
+ with io.BytesIO() as out:
546
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
547
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
548
+
549
+ return predictions
550
+
551
+
552
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
553
+ def resize_annotation(
554
+ annotation: Dict[str, Any],
555
+ orig_size: Tuple[int, int],
556
+ target_size: Tuple[int, int],
557
+ threshold: float = 0.5,
558
+ resample: PILImageResampling = PILImageResampling.NEAREST,
559
+ ):
560
+ """
561
+ Resizes an annotation to a target size.
562
+
563
+ Args:
564
+ annotation (`Dict[str, Any]`):
565
+ The annotation dictionary.
566
+ orig_size (`Tuple[int, int]`):
567
+ The original size of the input image.
568
+ target_size (`Tuple[int, int]`):
569
+ The target size of the image, as returned by the preprocessing `resize` step.
570
+ threshold (`float`, *optional*, defaults to 0.5):
571
+ The threshold used to binarize the segmentation masks.
572
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
573
+ The resampling filter to use when resizing the masks.
574
+ """
575
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
576
+ ratio_height, ratio_width = ratios
577
+
578
+ new_annotation = {}
579
+ new_annotation["size"] = target_size
580
+
581
+ for key, value in annotation.items():
582
+ if key == "boxes":
583
+ boxes = value
584
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
585
+ new_annotation["boxes"] = scaled_boxes
586
+ elif key == "area":
587
+ area = value
588
+ scaled_area = area * (ratio_width * ratio_height)
589
+ new_annotation["area"] = scaled_area
590
+ elif key == "masks":
591
+ masks = value[:, None]
592
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
593
+ masks = masks.astype(np.float32)
594
+ masks = masks[:, 0] > threshold
595
+ new_annotation["masks"] = masks
596
+ elif key == "size":
597
+ new_annotation["size"] = target_size
598
+ else:
599
+ new_annotation[key] = value
600
+
601
+ return new_annotation
602
+
603
+
604
+ # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
605
+ def binary_mask_to_rle(mask):
606
+ """
607
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
608
+
609
+ Args:
610
+ mask (`torch.Tensor` or `numpy.array`):
611
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
612
+ segment_id or class_id.
613
+ Returns:
614
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
615
+ format.
616
+ """
617
+ if is_torch_tensor(mask):
618
+ mask = mask.numpy()
619
+
620
+ pixels = mask.flatten()
621
+ pixels = np.concatenate([[0], pixels, [0]])
622
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
623
+ runs[1::2] -= runs[::2]
624
+ return list(runs)
625
+
626
+
627
+ # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
628
+ def convert_segmentation_to_rle(segmentation):
629
+ """
630
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
631
+
632
+ Args:
633
+ segmentation (`torch.Tensor` or `numpy.array`):
634
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
635
+ Returns:
636
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
637
+ """
638
+ segment_ids = torch.unique(segmentation)
639
+
640
+ run_length_encodings = []
641
+ for idx in segment_ids:
642
+ mask = torch.where(segmentation == idx, 1, 0)
643
+ rle = binary_mask_to_rle(mask)
644
+ run_length_encodings.append(rle)
645
+
646
+ return run_length_encodings
647
+
648
+
649
+ # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
650
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
651
+ """
652
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
653
+ `labels`.
654
+
655
+ Args:
656
+ masks (`torch.Tensor`):
657
+ A tensor of shape `(num_queries, height, width)`.
658
+ scores (`torch.Tensor`):
659
+ A tensor of shape `(num_queries)`.
660
+ labels (`torch.Tensor`):
661
+ A tensor of shape `(num_queries)`.
662
+ object_mask_threshold (`float`):
663
+ A number between 0 and 1 used to binarize the masks.
664
+ Raises:
665
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
666
+ Returns:
667
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
668
+ < `object_mask_threshold`.
669
+ """
670
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
671
+ raise ValueError("mask, scores and labels must have the same shape!")
672
+
673
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
674
+
675
+ return masks[to_keep], scores[to_keep], labels[to_keep]
676
+
677
+
678
+ # Copied from transformers.models.detr.image_processing_detr.check_segment_validity
679
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
680
+ # Get the mask associated with the k class
681
+ mask_k = mask_labels == k
682
+ mask_k_area = mask_k.sum()
683
+
684
+ # Compute the area of all the stuff in query k
685
+ original_area = (mask_probs[k] >= mask_threshold).sum()
686
+ mask_exists = mask_k_area > 0 and original_area > 0
687
+
688
+ # Eliminate disconnected tiny segments
689
+ if mask_exists:
690
+ area_ratio = mask_k_area / original_area
691
+ if not area_ratio.item() > overlap_mask_area_threshold:
692
+ mask_exists = False
693
+
694
+ return mask_exists, mask_k
695
+
696
+
697
+ # Copied from transformers.models.detr.image_processing_detr.compute_segments
698
+ def compute_segments(
699
+ mask_probs,
700
+ pred_scores,
701
+ pred_labels,
702
+ mask_threshold: float = 0.5,
703
+ overlap_mask_area_threshold: float = 0.8,
704
+ label_ids_to_fuse: Optional[Set[int]] = None,
705
+ target_size: Tuple[int, int] = None,
706
+ ):
707
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
708
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
709
+
710
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
711
+ segments: List[Dict] = []
712
+
713
+ if target_size is not None:
714
+ mask_probs = nn.functional.interpolate(
715
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
716
+ )[0]
717
+
718
+ current_segment_id = 0
719
+
720
+ # Weigh each mask by its prediction score
721
+ mask_probs *= pred_scores.view(-1, 1, 1)
722
+ mask_labels = mask_probs.argmax(0) # [height, width]
723
+
724
+ # Keep track of instances of each class
725
+ stuff_memory_list: Dict[str, int] = {}
726
+ for k in range(pred_labels.shape[0]):
727
+ pred_class = pred_labels[k].item()
728
+ should_fuse = pred_class in label_ids_to_fuse
729
+
730
+ # Check if mask exists and large enough to be a segment
731
+ mask_exists, mask_k = check_segment_validity(
732
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
733
+ )
734
+
735
+ if mask_exists:
736
+ if pred_class in stuff_memory_list:
737
+ current_segment_id = stuff_memory_list[pred_class]
738
+ else:
739
+ current_segment_id += 1
740
+
741
+ # Add current object segment to final segmentation map
742
+ segmentation[mask_k] = current_segment_id
743
+ segment_score = round(pred_scores[k].item(), 6)
744
+ segments.append(
745
+ {
746
+ "id": current_segment_id,
747
+ "label_id": pred_class,
748
+ "was_fused": should_fuse,
749
+ "score": segment_score,
750
+ }
751
+ )
752
+ if should_fuse:
753
+ stuff_memory_list[pred_class] = current_segment_id
754
+
755
+ return segmentation, segments
756
+
757
+
758
+ class DeformableDetrImageProcessor(BaseImageProcessor):
759
+ r"""
760
+ Constructs a Deformable DETR image processor.
761
+
762
+ Args:
763
+ format (`str`, *optional*, defaults to `"coco_detection"`):
764
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
765
+ do_resize (`bool`, *optional*, defaults to `True`):
766
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
767
+ overridden by the `do_resize` parameter in the `preprocess` method.
768
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
769
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
770
+ the `preprocess` method.
771
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
772
+ Resampling filter to use if resizing the image.
773
+ do_rescale (`bool`, *optional*, defaults to `True`):
774
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
775
+ `do_rescale` parameter in the `preprocess` method.
776
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
777
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
778
+ `preprocess` method.
779
+ do_normalize:
780
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
781
+ `preprocess` method.
782
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
783
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
784
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
785
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
786
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
787
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
788
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
789
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
790
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
791
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
792
+ do_pad (`bool`, *optional*, defaults to `True`):
793
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
794
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
795
+ Padding will be applied to the bottom and right of the image with zeros.
796
+ """
797
+
798
+ model_input_names = ["pixel_values", "pixel_mask"]
799
+
800
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
801
+ def __init__(
802
+ self,
803
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
804
+ do_resize: bool = True,
805
+ size: Dict[str, int] = None,
806
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
807
+ do_rescale: bool = True,
808
+ rescale_factor: Union[int, float] = 1 / 255,
809
+ do_normalize: bool = True,
810
+ image_mean: Union[float, List[float]] = None,
811
+ image_std: Union[float, List[float]] = None,
812
+ do_convert_annotations: Optional[bool] = None,
813
+ do_pad: bool = True,
814
+ **kwargs,
815
+ ) -> None:
816
+ if "pad_and_return_pixel_mask" in kwargs:
817
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
818
+
819
+ if "max_size" in kwargs:
820
+ logger.warning_once(
821
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
822
+ "Please specify in `size['longest_edge'] instead`.",
823
+ )
824
+ max_size = kwargs.pop("max_size")
825
+ else:
826
+ max_size = None if size is None else 1333
827
+
828
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
829
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
830
+
831
+ # Backwards compatibility
832
+ if do_convert_annotations is None:
833
+ do_convert_annotations = do_normalize
834
+
835
+ super().__init__(**kwargs)
836
+ self.format = format
837
+ self.do_resize = do_resize
838
+ self.size = size
839
+ self.resample = resample
840
+ self.do_rescale = do_rescale
841
+ self.rescale_factor = rescale_factor
842
+ self.do_normalize = do_normalize
843
+ self.do_convert_annotations = do_convert_annotations
844
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
845
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
846
+ self.do_pad = do_pad
847
+ self._valid_processor_keys = [
848
+ "images",
849
+ "annotations",
850
+ "return_segmentation_masks",
851
+ "masks_path",
852
+ "do_resize",
853
+ "size",
854
+ "resample",
855
+ "do_rescale",
856
+ "rescale_factor",
857
+ "do_normalize",
858
+ "do_convert_annotations",
859
+ "image_mean",
860
+ "image_std",
861
+ "do_pad",
862
+ "format",
863
+ "return_tensors",
864
+ "data_format",
865
+ "input_data_format",
866
+ ]
867
+
868
+ @classmethod
869
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr
870
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
871
+ """
872
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
873
+ created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600,
874
+ max_size=800)`
875
+ """
876
+ image_processor_dict = image_processor_dict.copy()
877
+ if "max_size" in kwargs:
878
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
879
+ if "pad_and_return_pixel_mask" in kwargs:
880
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
881
+ return super().from_dict(image_processor_dict, **kwargs)
882
+
883
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr
884
+ def prepare_annotation(
885
+ self,
886
+ image: np.ndarray,
887
+ target: Dict,
888
+ format: Optional[AnnotationFormat] = None,
889
+ return_segmentation_masks: bool = None,
890
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
891
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
892
+ ) -> Dict:
893
+ """
894
+ Prepare an annotation for feeding into DeformableDetr model.
895
+ """
896
+ format = format if format is not None else self.format
897
+
898
+ if format == AnnotationFormat.COCO_DETECTION:
899
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
900
+ target = prepare_coco_detection_annotation(
901
+ image, target, return_segmentation_masks, input_data_format=input_data_format
902
+ )
903
+ elif format == AnnotationFormat.COCO_PANOPTIC:
904
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
905
+ target = prepare_coco_panoptic_annotation(
906
+ image,
907
+ target,
908
+ masks_path=masks_path,
909
+ return_masks=return_segmentation_masks,
910
+ input_data_format=input_data_format,
911
+ )
912
+ else:
913
+ raise ValueError(f"Format {format} is not supported.")
914
+ return target
915
+
916
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
917
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
918
+ logger.warning_once(
919
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
920
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
921
+ "does not return the image anymore.",
922
+ )
923
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
924
+ return image, target
925
+
926
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
927
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
928
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
929
+ return convert_coco_poly_to_mask(*args, **kwargs)
930
+
931
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
932
+ def prepare_coco_detection(self, *args, **kwargs):
933
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
934
+ return prepare_coco_detection_annotation(*args, **kwargs)
935
+
936
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
937
+ def prepare_coco_panoptic(self, *args, **kwargs):
938
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
939
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
940
+
941
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
942
+ def resize(
943
+ self,
944
+ image: np.ndarray,
945
+ size: Dict[str, int],
946
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
947
+ data_format: Optional[ChannelDimension] = None,
948
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
949
+ **kwargs,
950
+ ) -> np.ndarray:
951
+ """
952
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
953
+ int, smaller edge of the image will be matched to this number.
954
+
955
+ Args:
956
+ image (`np.ndarray`):
957
+ Image to resize.
958
+ size (`Dict[str, int]`):
959
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
960
+ `height` and `width`.
961
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
962
+ Resampling filter to use if resizing the image.
963
+ data_format (`str` or `ChannelDimension`, *optional*):
964
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
965
+ image is used.
966
+ input_data_format (`ChannelDimension` or `str`, *optional*):
967
+ The channel dimension format of the input image. If not provided, it will be inferred.
968
+ """
969
+ if "max_size" in kwargs:
970
+ logger.warning_once(
971
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
972
+ "Please specify in `size['longest_edge'] instead`.",
973
+ )
974
+ max_size = kwargs.pop("max_size")
975
+ else:
976
+ max_size = None
977
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
978
+ if "shortest_edge" in size and "longest_edge" in size:
979
+ size = get_resize_output_image_size(
980
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
981
+ )
982
+ elif "height" in size and "width" in size:
983
+ size = (size["height"], size["width"])
984
+ else:
985
+ raise ValueError(
986
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
987
+ f" {size.keys()}."
988
+ )
989
+ image = resize(
990
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
991
+ )
992
+ return image
993
+
994
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
995
+ def resize_annotation(
996
+ self,
997
+ annotation,
998
+ orig_size,
999
+ size,
1000
+ resample: PILImageResampling = PILImageResampling.NEAREST,
1001
+ ) -> Dict:
1002
+ """
1003
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
1004
+ to this number.
1005
+ """
1006
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
1007
+
1008
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
1009
+ def rescale(
1010
+ self,
1011
+ image: np.ndarray,
1012
+ rescale_factor: float,
1013
+ data_format: Optional[Union[str, ChannelDimension]] = None,
1014
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1015
+ ) -> np.ndarray:
1016
+ """
1017
+ Rescale the image by the given factor. image = image * rescale_factor.
1018
+
1019
+ Args:
1020
+ image (`np.ndarray`):
1021
+ Image to rescale.
1022
+ rescale_factor (`float`):
1023
+ The value to use for rescaling.
1024
+ data_format (`str` or `ChannelDimension`, *optional*):
1025
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
1026
+ image is used. Can be one of:
1027
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1028
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1029
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1030
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
1031
+ one of:
1032
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1033
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1034
+ """
1035
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
1036
+
1037
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
1038
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
1039
+ """
1040
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
1041
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
1042
+ """
1043
+ return normalize_annotation(annotation, image_size=image_size)
1044
+
1045
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
1046
+ def _update_annotation_for_padded_image(
1047
+ self,
1048
+ annotation: Dict,
1049
+ input_image_size: Tuple[int, int],
1050
+ output_image_size: Tuple[int, int],
1051
+ padding,
1052
+ update_bboxes,
1053
+ ) -> Dict:
1054
+ """
1055
+ Update the annotation for a padded image.
1056
+ """
1057
+ new_annotation = {}
1058
+ new_annotation["size"] = output_image_size
1059
+
1060
+ for key, value in annotation.items():
1061
+ if key == "masks":
1062
+ masks = value
1063
+ masks = pad(
1064
+ masks,
1065
+ padding,
1066
+ mode=PaddingMode.CONSTANT,
1067
+ constant_values=0,
1068
+ input_data_format=ChannelDimension.FIRST,
1069
+ )
1070
+ masks = safe_squeeze(masks, 1)
1071
+ new_annotation["masks"] = masks
1072
+ elif key == "boxes" and update_bboxes:
1073
+ boxes = value
1074
+ boxes *= np.asarray(
1075
+ [
1076
+ input_image_size[1] / output_image_size[1],
1077
+ input_image_size[0] / output_image_size[0],
1078
+ input_image_size[1] / output_image_size[1],
1079
+ input_image_size[0] / output_image_size[0],
1080
+ ]
1081
+ )
1082
+ new_annotation["boxes"] = boxes
1083
+ elif key == "size":
1084
+ new_annotation["size"] = output_image_size
1085
+ else:
1086
+ new_annotation[key] = value
1087
+ return new_annotation
1088
+
1089
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
1090
+ def _pad_image(
1091
+ self,
1092
+ image: np.ndarray,
1093
+ output_size: Tuple[int, int],
1094
+ annotation: Optional[Dict[str, Any]] = None,
1095
+ constant_values: Union[float, Iterable[float]] = 0,
1096
+ data_format: Optional[ChannelDimension] = None,
1097
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1098
+ update_bboxes: bool = True,
1099
+ ) -> np.ndarray:
1100
+ """
1101
+ Pad an image with zeros to the given size.
1102
+ """
1103
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
1104
+ output_height, output_width = output_size
1105
+
1106
+ pad_bottom = output_height - input_height
1107
+ pad_right = output_width - input_width
1108
+ padding = ((0, pad_bottom), (0, pad_right))
1109
+ padded_image = pad(
1110
+ image,
1111
+ padding,
1112
+ mode=PaddingMode.CONSTANT,
1113
+ constant_values=constant_values,
1114
+ data_format=data_format,
1115
+ input_data_format=input_data_format,
1116
+ )
1117
+ if annotation is not None:
1118
+ annotation = self._update_annotation_for_padded_image(
1119
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
1120
+ )
1121
+ return padded_image, annotation
1122
+
1123
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
1124
+ def pad(
1125
+ self,
1126
+ images: List[np.ndarray],
1127
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1128
+ constant_values: Union[float, Iterable[float]] = 0,
1129
+ return_pixel_mask: bool = True,
1130
+ return_tensors: Optional[Union[str, TensorType]] = None,
1131
+ data_format: Optional[ChannelDimension] = None,
1132
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1133
+ update_bboxes: bool = True,
1134
+ ) -> BatchFeature:
1135
+ """
1136
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
1137
+ in the batch and optionally returns their corresponding pixel mask.
1138
+
1139
+ Args:
1140
+ images (List[`np.ndarray`]):
1141
+ Images to pad.
1142
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1143
+ Annotations to transform according to the padding that is applied to the images.
1144
+ constant_values (`float` or `Iterable[float]`, *optional*):
1145
+ The value to use for the padding if `mode` is `"constant"`.
1146
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
1147
+ Whether to return a pixel mask.
1148
+ return_tensors (`str` or `TensorType`, *optional*):
1149
+ The type of tensors to return. Can be one of:
1150
+ - Unset: Return a list of `np.ndarray`.
1151
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
1152
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
1153
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
1154
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
1155
+ data_format (`str` or `ChannelDimension`, *optional*):
1156
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
1157
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1158
+ The channel dimension format of the input image. If not provided, it will be inferred.
1159
+ update_bboxes (`bool`, *optional*, defaults to `True`):
1160
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
1161
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
1162
+ format, the bounding boxes will not be updated.
1163
+ """
1164
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
1165
+
1166
+ annotation_list = annotations if annotations is not None else [None] * len(images)
1167
+ padded_images = []
1168
+ padded_annotations = []
1169
+ for image, annotation in zip(images, annotation_list):
1170
+ padded_image, padded_annotation = self._pad_image(
1171
+ image,
1172
+ pad_size,
1173
+ annotation,
1174
+ constant_values=constant_values,
1175
+ data_format=data_format,
1176
+ input_data_format=input_data_format,
1177
+ update_bboxes=update_bboxes,
1178
+ )
1179
+ padded_images.append(padded_image)
1180
+ padded_annotations.append(padded_annotation)
1181
+
1182
+ data = {"pixel_values": padded_images}
1183
+
1184
+ if return_pixel_mask:
1185
+ masks = [
1186
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
1187
+ for image in images
1188
+ ]
1189
+ data["pixel_mask"] = masks
1190
+
1191
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1192
+
1193
+ if annotations is not None:
1194
+ encoded_inputs["labels"] = [
1195
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
1196
+ ]
1197
+
1198
+ return encoded_inputs
1199
+
1200
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
1201
+ def preprocess(
1202
+ self,
1203
+ images: ImageInput,
1204
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1205
+ return_segmentation_masks: bool = None,
1206
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
1207
+ do_resize: Optional[bool] = None,
1208
+ size: Optional[Dict[str, int]] = None,
1209
+ resample=None, # PILImageResampling
1210
+ do_rescale: Optional[bool] = None,
1211
+ rescale_factor: Optional[Union[int, float]] = None,
1212
+ do_normalize: Optional[bool] = None,
1213
+ do_convert_annotations: Optional[bool] = None,
1214
+ image_mean: Optional[Union[float, List[float]]] = None,
1215
+ image_std: Optional[Union[float, List[float]]] = None,
1216
+ do_pad: Optional[bool] = None,
1217
+ format: Optional[Union[str, AnnotationFormat]] = None,
1218
+ return_tensors: Optional[Union[TensorType, str]] = None,
1219
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
1220
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1221
+ **kwargs,
1222
+ ) -> BatchFeature:
1223
+ """
1224
+ Preprocess an image or a batch of images so that it can be used by the model.
1225
+
1226
+ Args:
1227
+ images (`ImageInput`):
1228
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
1229
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
1230
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1231
+ List of annotations associated with the image or batch of images. If annotation is for object
1232
+ detection, the annotations should be a dictionary with the following keys:
1233
+ - "image_id" (`int`): The image id.
1234
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
1235
+ dictionary. An image can have no annotations, in which case the list should be empty.
1236
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
1237
+ - "image_id" (`int`): The image id.
1238
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
1239
+ An image can have no segments, in which case the list should be empty.
1240
+ - "file_name" (`str`): The file name of the image.
1241
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
1242
+ Whether to return segmentation masks.
1243
+ masks_path (`str` or `pathlib.Path`, *optional*):
1244
+ Path to the directory containing the segmentation masks.
1245
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
1246
+ Whether to resize the image.
1247
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
1248
+ Size of the image after resizing.
1249
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
1250
+ Resampling filter to use when resizing the image.
1251
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
1252
+ Whether to rescale the image.
1253
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
1254
+ Rescale factor to use when rescaling the image.
1255
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
1256
+ Whether to normalize the image.
1257
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
1258
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
1259
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
1260
+ and in relative coordinates.
1261
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
1262
+ Mean to use when normalizing the image.
1263
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
1264
+ Standard deviation to use when normalizing the image.
1265
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
1266
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
1267
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
1268
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
1269
+ Format of the annotations.
1270
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
1271
+ Type of tensors to return. If `None`, will return the list of images.
1272
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
1273
+ The channel dimension format for the output image. Can be one of:
1274
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1275
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1276
+ - Unset: Use the channel dimension format of the input image.
1277
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1278
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
1279
+ from the input image. Can be one of:
1280
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1281
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1282
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1283
+ """
1284
+ if "pad_and_return_pixel_mask" in kwargs:
1285
+ logger.warning_once(
1286
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
1287
+ "use `do_pad` instead."
1288
+ )
1289
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
1290
+
1291
+ max_size = None
1292
+ if "max_size" in kwargs:
1293
+ logger.warning_once(
1294
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
1295
+ " `size['longest_edge']` instead."
1296
+ )
1297
+ size = kwargs.pop("max_size")
1298
+
1299
+ do_resize = self.do_resize if do_resize is None else do_resize
1300
+ size = self.size if size is None else size
1301
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
1302
+ resample = self.resample if resample is None else resample
1303
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
1304
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
1305
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
1306
+ image_mean = self.image_mean if image_mean is None else image_mean
1307
+ image_std = self.image_std if image_std is None else image_std
1308
+ do_convert_annotations = (
1309
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
1310
+ )
1311
+ do_pad = self.do_pad if do_pad is None else do_pad
1312
+ format = self.format if format is None else format
1313
+
1314
+ images = make_list_of_images(images)
1315
+
1316
+ if not valid_images(images):
1317
+ raise ValueError(
1318
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
1319
+ "torch.Tensor, tf.Tensor or jax.ndarray."
1320
+ )
1321
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
1322
+
1323
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
1324
+ validate_preprocess_arguments(
1325
+ do_rescale=do_rescale,
1326
+ rescale_factor=rescale_factor,
1327
+ do_normalize=do_normalize,
1328
+ image_mean=image_mean,
1329
+ image_std=image_std,
1330
+ do_resize=do_resize,
1331
+ size=size,
1332
+ resample=resample,
1333
+ )
1334
+
1335
+ if annotations is not None and isinstance(annotations, dict):
1336
+ annotations = [annotations]
1337
+
1338
+ if annotations is not None and len(images) != len(annotations):
1339
+ raise ValueError(
1340
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
1341
+ )
1342
+
1343
+ format = AnnotationFormat(format)
1344
+ if annotations is not None:
1345
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
1346
+
1347
+ if (
1348
+ masks_path is not None
1349
+ and format == AnnotationFormat.COCO_PANOPTIC
1350
+ and not isinstance(masks_path, (pathlib.Path, str))
1351
+ ):
1352
+ raise ValueError(
1353
+ "The path to the directory containing the mask PNG files should be provided as a"
1354
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
1355
+ )
1356
+
1357
+ # All transformations expect numpy arrays
1358
+ images = [to_numpy_array(image) for image in images]
1359
+
1360
+ if is_scaled_image(images[0]) and do_rescale:
1361
+ logger.warning_once(
1362
+ "It looks like you are trying to rescale already rescaled images. If the input"
1363
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1364
+ )
1365
+
1366
+ if input_data_format is None:
1367
+ # We assume that all images have the same channel dimension format.
1368
+ input_data_format = infer_channel_dimension_format(images[0])
1369
+
1370
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1371
+ if annotations is not None:
1372
+ prepared_images = []
1373
+ prepared_annotations = []
1374
+ for image, target in zip(images, annotations):
1375
+ target = self.prepare_annotation(
1376
+ image,
1377
+ target,
1378
+ format,
1379
+ return_segmentation_masks=return_segmentation_masks,
1380
+ masks_path=masks_path,
1381
+ input_data_format=input_data_format,
1382
+ )
1383
+ prepared_images.append(image)
1384
+ prepared_annotations.append(target)
1385
+ images = prepared_images
1386
+ annotations = prepared_annotations
1387
+ del prepared_images, prepared_annotations
1388
+
1389
+ # transformations
1390
+ if do_resize:
1391
+ if annotations is not None:
1392
+ resized_images, resized_annotations = [], []
1393
+ for image, target in zip(images, annotations):
1394
+ orig_size = get_image_size(image, input_data_format)
1395
+ resized_image = self.resize(
1396
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
1397
+ )
1398
+ resized_annotation = self.resize_annotation(
1399
+ target, orig_size, get_image_size(resized_image, input_data_format)
1400
+ )
1401
+ resized_images.append(resized_image)
1402
+ resized_annotations.append(resized_annotation)
1403
+ images = resized_images
1404
+ annotations = resized_annotations
1405
+ del resized_images, resized_annotations
1406
+ else:
1407
+ images = [
1408
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1409
+ for image in images
1410
+ ]
1411
+
1412
+ if do_rescale:
1413
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1414
+
1415
+ if do_normalize:
1416
+ images = [
1417
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1418
+ ]
1419
+
1420
+ if do_convert_annotations and annotations is not None:
1421
+ annotations = [
1422
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1423
+ for annotation, image in zip(annotations, images)
1424
+ ]
1425
+
1426
+ if do_pad:
1427
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1428
+ encoded_inputs = self.pad(
1429
+ images,
1430
+ annotations=annotations,
1431
+ return_pixel_mask=True,
1432
+ data_format=data_format,
1433
+ input_data_format=input_data_format,
1434
+ update_bboxes=do_convert_annotations,
1435
+ return_tensors=return_tensors,
1436
+ )
1437
+ else:
1438
+ images = [
1439
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1440
+ for image in images
1441
+ ]
1442
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1443
+ if annotations is not None:
1444
+ encoded_inputs["labels"] = [
1445
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1446
+ ]
1447
+
1448
+ return encoded_inputs
1449
+
1450
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
1451
+ def post_process(self, outputs, target_sizes):
1452
+ """
1453
+ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
1454
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1455
+
1456
+ Args:
1457
+ outputs ([`DeformableDetrObjectDetectionOutput`]):
1458
+ Raw outputs of the model.
1459
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1460
+ Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
1461
+ original image size (before any data augmentation). For visualization, this should be the image size
1462
+ after data augment, but before padding.
1463
+ Returns:
1464
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1465
+ in the batch as predicted by the model.
1466
+ """
1467
+ logger.warning_once(
1468
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
1469
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
1470
+ )
1471
+
1472
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1473
+
1474
+ if len(out_logits) != len(target_sizes):
1475
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
1476
+ if target_sizes.shape[1] != 2:
1477
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
1478
+
1479
+ prob = out_logits.sigmoid()
1480
+ topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
1481
+ scores = topk_values
1482
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1483
+ labels = topk_indexes % out_logits.shape[2]
1484
+ boxes = center_to_corners_format(out_bbox)
1485
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1486
+
1487
+ # and from relative [0, 1] to absolute [0, height] coordinates
1488
+ img_h, img_w = target_sizes.unbind(1)
1489
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
1490
+ boxes = boxes * scale_fct[:, None, :]
1491
+
1492
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
1493
+
1494
+ return results
1495
+
1496
+ def post_process_object_detection(
1497
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100
1498
+ ):
1499
+ """
1500
+ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
1501
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1502
+
1503
+ Args:
1504
+ outputs ([`DetrObjectDetectionOutput`]):
1505
+ Raw outputs of the model.
1506
+ threshold (`float`, *optional*):
1507
+ Score threshold to keep object detection predictions.
1508
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1509
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1510
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1511
+ top_k (`int`, *optional*, defaults to 100):
1512
+ Keep only top k bounding boxes before filtering by thresholding.
1513
+
1514
+ Returns:
1515
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1516
+ in the batch as predicted by the model.
1517
+ """
1518
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1519
+
1520
+ if target_sizes is not None:
1521
+ if len(out_logits) != len(target_sizes):
1522
+ raise ValueError(
1523
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1524
+ )
1525
+
1526
+ prob = out_logits.sigmoid()
1527
+ prob = prob.view(out_logits.shape[0], -1)
1528
+ k_value = min(top_k, prob.size(1))
1529
+ topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
1530
+ scores = topk_values
1531
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1532
+ labels = topk_indexes % out_logits.shape[2]
1533
+ boxes = center_to_corners_format(out_bbox)
1534
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1535
+
1536
+ # and from relative [0, 1] to absolute [0, height] coordinates
1537
+ if target_sizes is not None:
1538
+ if isinstance(target_sizes, List):
1539
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1540
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1541
+ else:
1542
+ img_h, img_w = target_sizes.unbind(1)
1543
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1544
+ boxes = boxes * scale_fct[:, None, :]
1545
+
1546
+ results = []
1547
+ for s, l, b in zip(scores, labels, boxes):
1548
+ score = s[s > threshold]
1549
+ label = l[s > threshold]
1550
+ box = b[s > threshold]
1551
+ results.append({"scores": score, "labels": label, "boxes": box})
1552
+
1553
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Loading of Deformable DETR's CUDA kernels"""
16
+ import os
17
+ from pathlib import Path
18
+
19
+
20
+ def load_cuda_kernels():
21
+ from torch.utils.cpp_extension import load
22
+
23
+ root = Path(__file__).resolve().parent.parent.parent / "kernels" / "deformable_detr"
24
+ src_files = [
25
+ root / filename
26
+ for filename in [
27
+ "vision.cpp",
28
+ os.path.join("cpu", "ms_deform_attn_cpu.cpp"),
29
+ os.path.join("cuda", "ms_deform_attn_cuda.cu"),
30
+ ]
31
+ ]
32
+
33
+ load(
34
+ "MultiScaleDeformableAttention",
35
+ src_files,
36
+ with_cuda=True,
37
+ extra_include_paths=[str(root)],
38
+ extra_cflags=["-DWITH_CUDA=1"],
39
+ extra_cuda_cflags=[
40
+ "-DCUDA_HAS_FP16=1",
41
+ "-D__CUDA_NO_HALF_OPERATORS__",
42
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
43
+ "-D__CUDA_NO_HALF2_OPERATORS__",
44
+ ],
45
+ )
46
+
47
+ import MultiScaleDeformableAttention as MSDA
48
+
49
+ return MSDA
llmeval-env/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__init__.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_jamba": ["JambaConfig"],
21
+ }
22
+
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_jamba"] = [
31
+ "JambaForCausalLM",
32
+ "JambaForSequenceClassification",
33
+ "JambaModel",
34
+ "JambaPreTrainedModel",
35
+ ]
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ from .configuration_jamba import JambaConfig
40
+
41
+ try:
42
+ if not is_torch_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ from .modeling_jamba import (
48
+ JambaForCausalLM,
49
+ JambaForSequenceClassification,
50
+ JambaModel,
51
+ JambaPreTrainedModel,
52
+ )
53
+
54
+
55
+ else:
56
+ import sys
57
+
58
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (848 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc ADDED
Binary file (51.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Jamba model configuration"""
16
+ import math
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class JambaConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`JambaModel`]. It is used to instantiate a
28
+ Jamba model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of the Jamba-v0.1 model.
30
+
31
+ [ai21labs/Jamba-v0.1](https://huggingface.co/ai21labs/Jamba-v0.1)
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 65536):
39
+ Vocabulary size of the Jamba model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`JambaModel`]
41
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
42
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
43
+ model has a output word embedding layer.
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 14336):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*, defaults to 8):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
60
+ The non-linear activation function (function or string) in the decoder.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
69
+ Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
70
+ integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
71
+ logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
72
+ sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
73
+ significantly.
74
+ output_router_logits (`bool`, *optional*, defaults to `False`):
75
+ Whether or not the router logits should be returned by the model. Enabling this will also
76
+ allow the model to output the auxiliary loss. See [here]() for more details
77
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
78
+ The aux loss factor for the total loss.
79
+ pad_token_id (`int`, *optional*, defaults to 0):
80
+ The id of the padding token.
81
+ bos_token_id (`int`, *optional*, defaults to 1):
82
+ The id of the "beginning-of-sequence" token.
83
+ eos_token_id (`int`, *optional*, defaults to 2):
84
+ The id of the "end-of-sequence" token.
85
+ sliding_window (`int`, *optional*):
86
+ Sliding window attention window size. If not specified, will default to `None`.
87
+ max_position_embeddings (`int`, *optional*, defaults to 262144):
88
+ This value doesn't have any real effect. The maximum sequence length that this model is intended to be
89
+ used with. It can be used with longer sequences, but performance may degrade.
90
+ attention_dropout (`float`, *optional*, defaults to 0.0):
91
+ The dropout ratio for the attention probabilities.
92
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
93
+ The number of experts to root per-token, can be also interpreted as the `top-p` routing
94
+ parameter
95
+ num_experts (`int`, *optional*, defaults to 16):
96
+ Number of experts per Sparse MLP layer.
97
+ expert_layer_period (`int`, *optional*, defaults to 2):
98
+ Once in this many layers, we will have an expert layer
99
+ expert_layer_offset (`int`, *optional*, defaults to 1):
100
+ The first layer index that contains an expert mlp layer
101
+ attn_layer_period (`int`, *optional*, defaults to 8):
102
+ Once in this many layers, we will have a vanilla attention layer
103
+ attn_layer_offset (`int`, *optional*, defaults to 4):
104
+ The first layer index that contains a vanilla attention mlp layer
105
+ use_mamba_kernels (`bool`, *optional*, defaults to `True`):
106
+ Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
107
+ `causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if
108
+ `True` and kernels are not available
109
+ mamba_d_state (`int`, *optional*, defaults to 16):
110
+ The dimension the mamba state space latents
111
+ mamba_d_conv (`int`, *optional*, defaults to 4):
112
+ The size of the mamba convolution kernel
113
+ mamba_expand (`int`, *optional*, defaults to 2):
114
+ Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
115
+ mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
116
+ Rank of the the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
117
+ mamba_conv_bias (`bool`, *optional*, defaults to `True`):
118
+ Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
119
+ mamba_proj_bias (`bool`, *optional*, defaults to `False`):
120
+ Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
121
+
122
+ """
123
+
124
+ model_type = "jamba"
125
+ keys_to_ignore_at_inference = ["past_key_values"]
126
+
127
+ def __init__(
128
+ self,
129
+ vocab_size=65536,
130
+ tie_word_embeddings=False,
131
+ hidden_size=4096,
132
+ intermediate_size=14336,
133
+ num_hidden_layers=32,
134
+ num_attention_heads=32,
135
+ num_key_value_heads=8,
136
+ hidden_act="silu",
137
+ initializer_range=0.02,
138
+ rms_norm_eps=1e-6,
139
+ use_cache=True,
140
+ num_logits_to_keep=1,
141
+ output_router_logits=False,
142
+ router_aux_loss_coef=0.001,
143
+ pad_token_id=0,
144
+ bos_token_id=1,
145
+ eos_token_id=2,
146
+ sliding_window=None,
147
+ max_position_embeddings=262144,
148
+ attention_dropout=0.0,
149
+ num_experts_per_tok=2,
150
+ num_experts=16,
151
+ expert_layer_period=2,
152
+ expert_layer_offset=1,
153
+ attn_layer_period=8,
154
+ attn_layer_offset=4,
155
+ use_mamba_kernels=True,
156
+ mamba_d_state=16,
157
+ mamba_d_conv=4,
158
+ mamba_expand=2,
159
+ mamba_dt_rank="auto",
160
+ mamba_conv_bias=True,
161
+ mamba_proj_bias=False,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.tie_word_embeddings = tie_word_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.num_hidden_layers = num_hidden_layers
169
+ self.num_attention_heads = num_attention_heads
170
+ self.sliding_window = sliding_window
171
+ self.max_position_embeddings = max_position_embeddings
172
+ self.attention_dropout = attention_dropout
173
+
174
+ # for backward compatibility
175
+ if num_key_value_heads is None:
176
+ num_key_value_heads = num_attention_heads
177
+
178
+ self.num_key_value_heads = num_key_value_heads
179
+ self.hidden_act = hidden_act
180
+ self.initializer_range = initializer_range
181
+ self.rms_norm_eps = rms_norm_eps
182
+
183
+ self.use_cache = use_cache
184
+ self.num_logits_to_keep = num_logits_to_keep
185
+ self.output_router_logits = output_router_logits
186
+ self.router_aux_loss_coef = router_aux_loss_coef
187
+
188
+ self.num_experts_per_tok = num_experts_per_tok
189
+ self.num_experts = num_experts
190
+ self.expert_layer_period = expert_layer_period
191
+ self.expert_layer_offset = expert_layer_offset
192
+ self.attn_layer_period = attn_layer_period
193
+ self.attn_layer_offset = attn_layer_offset
194
+
195
+ self.use_mamba_kernels = use_mamba_kernels
196
+ self.mamba_d_state = mamba_d_state
197
+ self.mamba_d_conv = mamba_d_conv
198
+ self.mamba_expand = mamba_expand
199
+ self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == "auto" else mamba_dt_rank
200
+ self.mamba_conv_bias = mamba_conv_bias
201
+ self.mamba_proj_bias = mamba_proj_bias
202
+
203
+ super().__init__(
204
+ pad_token_id=pad_token_id,
205
+ bos_token_id=bos_token_id,
206
+ eos_token_id=eos_token_id,
207
+ tie_word_embeddings=tie_word_embeddings,
208
+ **kwargs,
209
+ )
210
+
211
+ @property
212
+ def layers_block_type(self):
213
+ return [
214
+ "attention" if i % self.attn_layer_period == self.attn_layer_offset else "mamba"
215
+ for i in range(self.num_hidden_layers)
216
+ ]
217
+
218
+ @property
219
+ def layers_num_experts(self):
220
+ return [
221
+ self.num_experts if i % self.expert_layer_period == self.expert_layer_offset else 1
222
+ for i in range(self.num_hidden_layers)
223
+ ]
llmeval-env/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py ADDED
@@ -0,0 +1,1882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Jamba model."""
21
+ import inspect
22
+ import math
23
+ from typing import Any, Dict, List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.nn.functional as F
27
+ import torch.utils.checkpoint
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import ACT2FN
32
+ from ...cache_utils import DynamicCache # we need __iter__ and __len__ of pkv
33
+ from ...modeling_attn_mask_utils import (
34
+ AttentionMaskConverter,
35
+ )
36
+ from ...modeling_outputs import (
37
+ MoeCausalLMOutputWithPast,
38
+ MoeModelOutputWithPast,
39
+ SequenceClassifierOutputWithPast,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...utils import (
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ is_flash_attn_greater_or_equal_2_10,
46
+ logging,
47
+ replace_return_docstrings,
48
+ )
49
+ from ...utils.import_utils import (
50
+ is_causal_conv1d_available,
51
+ is_flash_attn_2_available,
52
+ is_mamba_ssm_available,
53
+ )
54
+ from .configuration_jamba import JambaConfig
55
+
56
+
57
+ if is_flash_attn_2_available():
58
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
59
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
60
+
61
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
62
+
63
+
64
+ if is_mamba_ssm_available():
65
+ from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn
66
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
67
+ else:
68
+ selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None
69
+
70
+ if is_causal_conv1d_available():
71
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
72
+ else:
73
+ causal_conv1d_update, causal_conv1d_fn = None, None
74
+
75
+ is_fast_path_available = all(
76
+ (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)
77
+ )
78
+
79
+
80
+ logger = logging.get_logger(__name__)
81
+
82
+ _CONFIG_FOR_DOC = "JambaConfig"
83
+
84
+
85
+ # Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func with gate->router
86
+ def load_balancing_loss_func(
87
+ router_logits: torch.Tensor,
88
+ num_experts: torch.Tensor = None,
89
+ top_k=2,
90
+ attention_mask: Optional[torch.Tensor] = None,
91
+ ) -> float:
92
+ r"""
93
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
94
+
95
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
96
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
97
+ experts is too unbalanced.
98
+
99
+ Args:
100
+ router_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
101
+ Logits from the `router`, should be a tuple of model.config.num_hidden_layers tensors of
102
+ shape [batch_size X sequence_length, num_experts].
103
+ attention_mask (`torch.Tensor`, None):
104
+ The attention_mask used in forward function
105
+ shape [batch_size X sequence_length] if not None.
106
+ num_experts (`int`, *optional*):
107
+ Number of experts
108
+
109
+ Returns:
110
+ The auxiliary loss.
111
+ """
112
+ if router_logits is None or not isinstance(router_logits, tuple):
113
+ return 0
114
+
115
+ if isinstance(router_logits, tuple):
116
+ compute_device = router_logits[0].device
117
+ concatenated_router_logits = torch.cat(
118
+ [layer_router.to(compute_device) for layer_router in router_logits], dim=0
119
+ )
120
+
121
+ routing_weights = torch.nn.functional.softmax(concatenated_router_logits, dim=-1)
122
+
123
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
124
+
125
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
126
+
127
+ if attention_mask is None:
128
+ # Compute the percentage of tokens routed to each experts
129
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
130
+
131
+ # Compute the average probability of routing to these experts
132
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
133
+ else:
134
+ batch_size, sequence_length = attention_mask.shape
135
+ num_hidden_layers = concatenated_router_logits.shape[0] // (batch_size * sequence_length)
136
+
137
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
138
+ expert_attention_mask = (
139
+ attention_mask[None, :, :, None, None]
140
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
141
+ .reshape(-1, top_k, num_experts)
142
+ .to(compute_device)
143
+ )
144
+
145
+ # Compute the percentage of tokens routed to each experts
146
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
147
+ expert_attention_mask, dim=0
148
+ )
149
+
150
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
151
+ router_per_expert_attention_mask = (
152
+ attention_mask[None, :, :, None]
153
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
154
+ .reshape(-1, num_experts)
155
+ .to(compute_device)
156
+ )
157
+
158
+ # Compute the average probability of routing to these experts
159
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
160
+ router_per_expert_attention_mask, dim=0
161
+ )
162
+
163
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
164
+ return overall_loss * num_experts
165
+
166
+
167
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
168
+ def _get_unpad_data(attention_mask):
169
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
170
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
171
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
172
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
173
+ return (
174
+ indices,
175
+ cu_seqlens,
176
+ max_seqlen_in_batch,
177
+ )
178
+
179
+
180
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Jamba
181
+ class JambaRMSNorm(nn.Module):
182
+ def __init__(self, hidden_size, eps=1e-6):
183
+ """
184
+ JambaRMSNorm is equivalent to T5LayerNorm
185
+ """
186
+ super().__init__()
187
+ self.weight = nn.Parameter(torch.ones(hidden_size))
188
+ self.variance_epsilon = eps
189
+
190
+ def forward(self, hidden_states):
191
+ input_dtype = hidden_states.dtype
192
+ hidden_states = hidden_states.to(torch.float32)
193
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
194
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
195
+ return self.weight * hidden_states.to(input_dtype)
196
+
197
+
198
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
199
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
200
+ """
201
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
202
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
203
+ """
204
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
205
+ if n_rep == 1:
206
+ return hidden_states
207
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
208
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
209
+
210
+
211
+ class HybridMambaAttentionDynamicCache(DynamicCache):
212
+ """
213
+ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
214
+ (which has a constant shape regardless of seq_len).
215
+
216
+ This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
217
+ and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
218
+ For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
219
+ while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
220
+ For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
221
+ while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
222
+ and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
223
+ """
224
+
225
+ def __init__(self, config, batch_size, dtype=torch.float16, device=None):
226
+ self.dtype = dtype
227
+ self.layers_block_type = config.layers_block_type
228
+ self.has_previous_state = False # only used by mamba
229
+ intermediate_size = config.mamba_expand * config.hidden_size
230
+ ssm_state_size = config.mamba_d_state
231
+ conv_kernel_size = config.mamba_d_conv
232
+ self.conv_states = []
233
+ self.ssm_states = []
234
+ for i in range(config.num_hidden_layers):
235
+ if self.layers_block_type[i] == "mamba":
236
+ self.conv_states += [
237
+ torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype)
238
+ ]
239
+ self.ssm_states += [
240
+ torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype)
241
+ ]
242
+ else:
243
+ self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
244
+ self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
245
+
246
+ self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
247
+ self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
248
+
249
+ def update(
250
+ self,
251
+ key_states: torch.Tensor,
252
+ value_states: torch.Tensor,
253
+ layer_idx: int,
254
+ cache_kwargs: Optional[Dict[str, Any]] = None,
255
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
256
+ # Update the cache
257
+ if self.key_cache[layer_idx].shape[-1] == 0:
258
+ self.key_cache[layer_idx] = key_states
259
+ self.value_cache[layer_idx] = value_states
260
+ else:
261
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
262
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
263
+
264
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
265
+
266
+ def reorder_cache(self, beam_idx: torch.LongTensor):
267
+ """Reorders the cache for beam search, given the selected beam indices."""
268
+ for layer_idx in range(len(self.key_cache)):
269
+ device = self.key_cache[layer_idx].device
270
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
271
+ device = self.value_cache[layer_idx].device
272
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
273
+
274
+ device = self.conv_states[layer_idx].device
275
+ self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
276
+ device = self.ssm_states[layer_idx].device
277
+ self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
278
+
279
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
280
+ raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
281
+
282
+ @classmethod
283
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
284
+ raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
285
+
286
+
287
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Jamba
288
+ class JambaAttention(nn.Module):
289
+ """
290
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
291
+ and "Generating Long Sequences with Sparse Transformers".
292
+ """
293
+
294
+ def __init__(self, config: JambaConfig, layer_idx: Optional[int] = None):
295
+ super().__init__()
296
+ self.config = config
297
+ self.layer_idx = layer_idx
298
+ if layer_idx is None:
299
+ logger.warning_once(
300
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
301
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
302
+ "when creating this class."
303
+ )
304
+
305
+ self.hidden_size = config.hidden_size
306
+ self.num_heads = config.num_attention_heads
307
+ self.head_dim = self.hidden_size // self.num_heads
308
+ self.num_key_value_heads = config.num_key_value_heads
309
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
310
+ self.is_causal = True
311
+ self.attention_dropout = config.attention_dropout
312
+
313
+ if (self.head_dim * self.num_heads) != self.hidden_size:
314
+ raise ValueError(
315
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
316
+ f" and `num_heads`: {self.num_heads})."
317
+ )
318
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
319
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
320
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
321
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
322
+
323
+ def forward(
324
+ self,
325
+ hidden_states: torch.Tensor,
326
+ attention_mask: Optional[torch.Tensor] = None,
327
+ position_ids: Optional[torch.LongTensor] = None,
328
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
329
+ output_attentions: bool = False,
330
+ use_cache: bool = False,
331
+ cache_position: Optional[torch.LongTensor] = None,
332
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
333
+ bsz, q_len, _ = hidden_states.size()
334
+
335
+ query_states = self.q_proj(hidden_states)
336
+ key_states = self.k_proj(hidden_states)
337
+ value_states = self.v_proj(hidden_states)
338
+
339
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
340
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
341
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
342
+
343
+ if past_key_value is not None:
344
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
345
+
346
+ # repeat k/v heads if n_kv_heads < n_heads
347
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
348
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
349
+
350
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
351
+
352
+ if attention_mask is not None: # no matter the length, we just slice it
353
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
354
+ attn_weights = attn_weights + causal_mask
355
+
356
+ # upcast attention to fp32
357
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
358
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
359
+ attn_output = torch.matmul(attn_weights, value_states)
360
+
361
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
362
+ raise ValueError(
363
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
364
+ f" {attn_output.size()}"
365
+ )
366
+
367
+ attn_output = attn_output.transpose(1, 2).contiguous()
368
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
369
+
370
+ attn_output = self.o_proj(attn_output)
371
+
372
+ if not output_attentions:
373
+ attn_weights = None
374
+
375
+ return attn_output, attn_weights, past_key_value
376
+
377
+
378
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Jamba
379
+ class JambaFlashAttention2(JambaAttention):
380
+ """
381
+ Jamba flash attention module. This module inherits from `JambaAttention` as the weights of the module stays
382
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
383
+ flash attention and deal with padding tokens in case the input contains any of them.
384
+ """
385
+
386
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
387
+ def __init__(self, *args, **kwargs):
388
+ super().__init__(*args, **kwargs)
389
+
390
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
391
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
392
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
393
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
394
+
395
+ def forward(
396
+ self,
397
+ hidden_states: torch.Tensor,
398
+ attention_mask: Optional[torch.Tensor] = None,
399
+ position_ids: Optional[torch.LongTensor] = None,
400
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
401
+ output_attentions: bool = False,
402
+ use_cache: bool = False,
403
+ cache_position: Optional[torch.LongTensor] = None,
404
+ **kwargs,
405
+ ):
406
+ bsz, q_len, _ = hidden_states.size()
407
+
408
+ query_states = self.q_proj(hidden_states)
409
+ key_states = self.k_proj(hidden_states)
410
+ value_states = self.v_proj(hidden_states)
411
+
412
+ # Flash attention requires the input to have the shape
413
+ # batch_size x seq_length x head_dim x hidden_dim
414
+ # therefore we just need to keep the original shape
415
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
416
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
417
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
418
+
419
+ kv_seq_len = cache_position[-1]
420
+
421
+ use_sliding_windows = (
422
+ _flash_supports_window_size
423
+ and getattr(self.config, "sliding_window", None) is not None
424
+ and kv_seq_len > self.config.sliding_window
425
+ )
426
+
427
+ if not _flash_supports_window_size:
428
+ logger.warning_once(
429
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
430
+ " make sure to upgrade flash-attn library."
431
+ )
432
+
433
+ if past_key_value is not None:
434
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
435
+ cache_has_contents = cache_position[0] > 0
436
+ if (
437
+ getattr(self.config, "sliding_window", None) is not None
438
+ and kv_seq_len > self.config.sliding_window
439
+ and cache_has_contents
440
+ ):
441
+ slicing_tokens = 1 - self.config.sliding_window
442
+
443
+ past_key = past_key_value[self.layer_idx][0]
444
+ past_value = past_key_value[self.layer_idx][1]
445
+
446
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
447
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
448
+
449
+ if past_key.shape[-2] != self.config.sliding_window - 1:
450
+ raise ValueError(
451
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
452
+ f" {past_key.shape}"
453
+ )
454
+
455
+ if attention_mask is not None:
456
+ attention_mask = attention_mask[:, slicing_tokens:]
457
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
458
+
459
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
460
+
461
+ # repeat k/v heads if n_kv_heads < n_heads
462
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
463
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
464
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
465
+
466
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
467
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
468
+ # cast them back in float16 just to be sure everything works as expected.
469
+ input_dtype = query_states.dtype
470
+ if input_dtype == torch.float32:
471
+ if torch.is_autocast_enabled():
472
+ target_dtype = torch.get_autocast_gpu_dtype()
473
+ # Handle the case where the model is quantized
474
+ elif hasattr(self.config, "_pre_quantization_dtype"):
475
+ target_dtype = self.config._pre_quantization_dtype
476
+ else:
477
+ target_dtype = self.q_proj.weight.dtype
478
+
479
+ logger.warning_once(
480
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
481
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
482
+ f" {target_dtype}."
483
+ )
484
+
485
+ query_states = query_states.to(target_dtype)
486
+ key_states = key_states.to(target_dtype)
487
+ value_states = value_states.to(target_dtype)
488
+
489
+ # Reashape to the expected shape for Flash Attention
490
+ query_states = query_states.transpose(1, 2)
491
+ key_states = key_states.transpose(1, 2)
492
+ value_states = value_states.transpose(1, 2)
493
+
494
+ attn_output = self._flash_attention_forward(
495
+ query_states,
496
+ key_states,
497
+ value_states,
498
+ attention_mask,
499
+ q_len,
500
+ dropout=dropout_rate,
501
+ use_sliding_windows=use_sliding_windows,
502
+ )
503
+
504
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
505
+ attn_output = self.o_proj(attn_output)
506
+
507
+ if not output_attentions:
508
+ attn_weights = None
509
+
510
+ return attn_output, attn_weights, past_key_value
511
+
512
+ def _flash_attention_forward(
513
+ self,
514
+ query_states,
515
+ key_states,
516
+ value_states,
517
+ attention_mask,
518
+ query_length,
519
+ dropout=0.0,
520
+ softmax_scale=None,
521
+ use_sliding_windows=False,
522
+ ):
523
+ """
524
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
525
+ first unpad the input, then computes the attention scores and pad the final attention scores.
526
+
527
+ Args:
528
+ query_states (`torch.Tensor`):
529
+ Input query states to be passed to Flash Attention API
530
+ key_states (`torch.Tensor`):
531
+ Input key states to be passed to Flash Attention API
532
+ value_states (`torch.Tensor`):
533
+ Input value states to be passed to Flash Attention API
534
+ attention_mask (`torch.Tensor`):
535
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
536
+ position of padding tokens and 1 for the position of non-padding tokens.
537
+ dropout (`float`, *optional*):
538
+ Attention dropout
539
+ softmax_scale (`float`, *optional*):
540
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
541
+ use_sliding_windows (`bool`, *optional*):
542
+ Whether to activate sliding window attention.
543
+ """
544
+ if not self._flash_attn_uses_top_left_mask:
545
+ causal = self.is_causal
546
+ else:
547
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
548
+ causal = self.is_causal and query_length != 1
549
+
550
+ # Contains at least one padding token in the sequence
551
+ if attention_mask is not None:
552
+ batch_size = query_states.shape[0]
553
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
554
+ query_states, key_states, value_states, attention_mask, query_length
555
+ )
556
+
557
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
558
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
559
+
560
+ if not use_sliding_windows:
561
+ attn_output_unpad = flash_attn_varlen_func(
562
+ query_states,
563
+ key_states,
564
+ value_states,
565
+ cu_seqlens_q=cu_seqlens_q,
566
+ cu_seqlens_k=cu_seqlens_k,
567
+ max_seqlen_q=max_seqlen_in_batch_q,
568
+ max_seqlen_k=max_seqlen_in_batch_k,
569
+ dropout_p=dropout,
570
+ softmax_scale=softmax_scale,
571
+ causal=causal,
572
+ )
573
+ else:
574
+ attn_output_unpad = flash_attn_varlen_func(
575
+ query_states,
576
+ key_states,
577
+ value_states,
578
+ cu_seqlens_q=cu_seqlens_q,
579
+ cu_seqlens_k=cu_seqlens_k,
580
+ max_seqlen_q=max_seqlen_in_batch_q,
581
+ max_seqlen_k=max_seqlen_in_batch_k,
582
+ dropout_p=dropout,
583
+ softmax_scale=softmax_scale,
584
+ causal=causal,
585
+ window_size=(self.config.sliding_window, self.config.sliding_window),
586
+ )
587
+
588
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
589
+ else:
590
+ if not use_sliding_windows:
591
+ attn_output = flash_attn_func(
592
+ query_states,
593
+ key_states,
594
+ value_states,
595
+ dropout,
596
+ softmax_scale=softmax_scale,
597
+ causal=causal,
598
+ )
599
+ else:
600
+ attn_output = flash_attn_func(
601
+ query_states,
602
+ key_states,
603
+ value_states,
604
+ dropout,
605
+ softmax_scale=softmax_scale,
606
+ causal=causal,
607
+ window_size=(self.config.sliding_window, self.config.sliding_window),
608
+ )
609
+
610
+ return attn_output
611
+
612
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralFlashAttention2._upad_input
613
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
614
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
615
+
616
+ # On the first iteration we need to properly re-create the padding mask
617
+ # by slicing it on the proper place
618
+ if kv_seq_len != attention_mask.shape[-1]:
619
+ attention_mask_num_tokens = attention_mask.shape[-1]
620
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
621
+
622
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
623
+
624
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
625
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
626
+
627
+ if query_length == kv_seq_len:
628
+ query_layer = index_first_axis(
629
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
630
+ )
631
+ cu_seqlens_q = cu_seqlens_k
632
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
633
+ indices_q = indices_k
634
+ elif query_length == 1:
635
+ max_seqlen_in_batch_q = 1
636
+ cu_seqlens_q = torch.arange(
637
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
638
+ ) # There is a memcpy here, that is very bad.
639
+ indices_q = cu_seqlens_q[:-1]
640
+ query_layer = query_layer.squeeze(1)
641
+ else:
642
+ # The -q_len: slice assumes left padding.
643
+ attention_mask = attention_mask[:, -query_length:]
644
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
645
+
646
+ return (
647
+ query_layer,
648
+ key_layer,
649
+ value_layer,
650
+ indices_q,
651
+ (cu_seqlens_q, cu_seqlens_k),
652
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
653
+ )
654
+
655
+
656
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Jamba
657
+ class JambaSdpaAttention(JambaAttention):
658
+ """
659
+ Jamba attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
660
+ `JambaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
661
+ SDPA API.
662
+ """
663
+
664
+ # Adapted from JambaAttention.forward
665
+ def forward(
666
+ self,
667
+ hidden_states: torch.Tensor,
668
+ attention_mask: Optional[torch.Tensor] = None,
669
+ position_ids: Optional[torch.LongTensor] = None,
670
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
671
+ output_attentions: bool = False,
672
+ use_cache: bool = False,
673
+ cache_position: Optional[torch.LongTensor] = None,
674
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
675
+ if output_attentions:
676
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
677
+ logger.warning_once(
678
+ "JambaModel is using JambaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
679
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
680
+ )
681
+ return super().forward(
682
+ hidden_states=hidden_states,
683
+ attention_mask=attention_mask,
684
+ position_ids=position_ids,
685
+ past_key_value=past_key_value,
686
+ output_attentions=output_attentions,
687
+ use_cache=use_cache,
688
+ )
689
+
690
+ bsz, q_len, _ = hidden_states.size()
691
+
692
+ query_states = self.q_proj(hidden_states)
693
+ key_states = self.k_proj(hidden_states)
694
+ value_states = self.v_proj(hidden_states)
695
+
696
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
697
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
698
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
699
+
700
+ if past_key_value is not None:
701
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
702
+
703
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
704
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
705
+
706
+ causal_mask = attention_mask
707
+ if attention_mask is not None:
708
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
709
+
710
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
711
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
712
+ if query_states.device.type == "cuda" and attention_mask is not None:
713
+ query_states = query_states.contiguous()
714
+ key_states = key_states.contiguous()
715
+ value_states = value_states.contiguous()
716
+
717
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
718
+ query_states,
719
+ key_states,
720
+ value_states,
721
+ attn_mask=causal_mask,
722
+ dropout_p=self.attention_dropout if self.training else 0.0,
723
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
724
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
725
+ )
726
+
727
+ attn_output = attn_output.transpose(1, 2).contiguous()
728
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
729
+
730
+ attn_output = self.o_proj(attn_output)
731
+
732
+ return attn_output, None, past_key_value
733
+
734
+
735
+ JAMBA_ATTENTION_CLASSES = {
736
+ "eager": JambaAttention,
737
+ "flash_attention_2": JambaFlashAttention2,
738
+ "sdpa": JambaSdpaAttention,
739
+ }
740
+
741
+
742
+ # Adapted from transformers.models.mamba.modeling_mamba.MambaMixer
743
+ class JambaMambaMixer(nn.Module):
744
+ """
745
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
746
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
747
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
748
+ and is why Mamba is called **selective** state spaces)
749
+ """
750
+
751
+ def __init__(self, config: JambaConfig, layer_idx):
752
+ super().__init__()
753
+ self.config = config
754
+ self.layer_idx = layer_idx
755
+ self.hidden_size = config.hidden_size
756
+ self.ssm_state_size = config.mamba_d_state
757
+ self.conv_kernel_size = config.mamba_d_conv
758
+ self.intermediate_size = config.mamba_expand * config.hidden_size
759
+ self.time_step_rank = config.mamba_dt_rank
760
+ self.use_conv_bias = config.mamba_conv_bias
761
+ self.use_bias = config.mamba_proj_bias
762
+ self.conv1d = nn.Conv1d(
763
+ in_channels=self.intermediate_size,
764
+ out_channels=self.intermediate_size,
765
+ bias=self.use_conv_bias,
766
+ kernel_size=self.conv_kernel_size,
767
+ groups=self.intermediate_size,
768
+ padding=self.conv_kernel_size - 1,
769
+ )
770
+
771
+ self.activation = config.hidden_act
772
+ self.act = ACT2FN[config.hidden_act]
773
+
774
+ self.use_fast_kernels = config.use_mamba_kernels
775
+
776
+ # projection of the input hidden states
777
+ self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias)
778
+ # selective projection used to make dt, B and C input dependant
779
+ self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
780
+ # time step projection (discretization)
781
+ self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
782
+
783
+ # S4D real initialization. These are not discretized!
784
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
785
+ A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
786
+ A = A.expand(self.intermediate_size, -1).contiguous()
787
+
788
+ self.A_log = nn.Parameter(torch.log(A))
789
+ self.D = nn.Parameter(torch.ones(self.intermediate_size))
790
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
791
+
792
+ self.dt_layernorm = JambaRMSNorm(self.time_step_rank, eps=config.rms_norm_eps)
793
+ self.b_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps)
794
+ self.c_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps)
795
+
796
+ if not is_fast_path_available:
797
+ logger.warning_once(
798
+ "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
799
+ " is None. To install follow https://github.com/state-spaces/mamba/#installation and"
800
+ " https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config"
801
+ )
802
+
803
+ def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: HybridMambaAttentionDynamicCache = None):
804
+ batch_size, seq_len, _ = hidden_states.shape
805
+ use_precomputed_states = (
806
+ cache_params is not None
807
+ and cache_params.has_previous_state
808
+ and seq_len == 1
809
+ and cache_params.conv_states[self.layer_idx].shape[0]
810
+ == cache_params.ssm_states[self.layer_idx].shape[0]
811
+ == batch_size
812
+ )
813
+ # 1. Gated MLP's linear projection
814
+ projected_states = self.in_proj(hidden_states).transpose(1, 2)
815
+
816
+ # We can't use `mamba_inner_fn` even if in training and without cache params because we have the
817
+ # inner layernorms which isn't supported by this fused kernel
818
+ hidden_states, gate = projected_states.chunk(2, dim=1)
819
+
820
+ # 2. Convolution sequence transformation
821
+ conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
822
+ if use_precomputed_states:
823
+ hidden_states = causal_conv1d_update(
824
+ hidden_states.squeeze(-1),
825
+ cache_params.conv_states[self.layer_idx],
826
+ conv_weights,
827
+ self.conv1d.bias,
828
+ self.activation,
829
+ )
830
+ hidden_states = hidden_states.unsqueeze(-1)
831
+ else:
832
+ if cache_params is not None:
833
+ conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
834
+ cache_params.conv_states[self.layer_idx].copy_(conv_states)
835
+ hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation)
836
+
837
+ # 3. State Space Model sequence transformation
838
+ # 3.a. input varying initialization of time_step, B and C
839
+ ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
840
+ time_step, B, C = torch.split(
841
+ ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
842
+ )
843
+
844
+ time_step = self.dt_layernorm(time_step)
845
+ B = self.b_layernorm(B)
846
+ C = self.c_layernorm(C)
847
+
848
+ # Here we need to apply dt_proj without the bias, as the bias is added in the selective scan kernel.
849
+ # This is a hack to apply dt_proj while still using the forward pass of `torch.nn.Linear`, which is needed
850
+ # in order to make quantization work. Quantization code replaces `torch.nn.Linear` layers with quantized
851
+ # linear layers, and requires to call the forward pass directly.
852
+ # The original code here was: ```discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)```
853
+ time_proj_bias = self.dt_proj.bias
854
+ self.dt_proj.bias = None
855
+ discrete_time_step = self.dt_proj(time_step).transpose(1, 2)
856
+ self.dt_proj.bias = time_proj_bias
857
+
858
+ A = -torch.exp(self.A_log.float())
859
+ # 3.c perform the recurrence y ← SSM(A, B, C)(x)
860
+ time_proj_bias = time_proj_bias.float() if time_proj_bias is not None else None
861
+ if use_precomputed_states:
862
+ scan_outputs = selective_state_update(
863
+ cache_params.ssm_states[self.layer_idx],
864
+ hidden_states[..., 0],
865
+ discrete_time_step[..., 0],
866
+ A,
867
+ B[:, 0],
868
+ C[:, 0],
869
+ self.D,
870
+ gate[..., 0],
871
+ time_proj_bias,
872
+ dt_softplus=True,
873
+ ).unsqueeze(-1)
874
+ else:
875
+ scan_outputs, ssm_state = selective_scan_fn(
876
+ hidden_states,
877
+ discrete_time_step,
878
+ A,
879
+ B.transpose(1, 2),
880
+ C.transpose(1, 2),
881
+ self.D.float(),
882
+ gate,
883
+ time_proj_bias,
884
+ delta_softplus=True,
885
+ return_last_state=True,
886
+ )
887
+ if ssm_state is not None and cache_params is not None:
888
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
889
+
890
+ # 4. Final linear projection
891
+ contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
892
+
893
+ return contextualized_states
894
+
895
+ # fmt: off
896
+ def slow_forward(self, input_states, cache_params: HybridMambaAttentionDynamicCache = None):
897
+ batch_size, seq_len, _ = input_states.shape
898
+ dtype = input_states.dtype
899
+ # 1. Gated MLP's linear projection
900
+ projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len]
901
+ hidden_states, gate = projected_states.chunk(2, dim=1)
902
+
903
+ use_cache = isinstance(cache_params,HybridMambaAttentionDynamicCache)
904
+ # 2. Convolution sequence transformation
905
+ if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size:
906
+ if self.training:
907
+ # In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass
908
+ ssm_state = cache_params.ssm_states[self.layer_idx].clone()
909
+ else:
910
+ ssm_state = cache_params.ssm_states[self.layer_idx]
911
+
912
+ if cache_params.has_previous_state and seq_len == 1 and \
913
+ cache_params.conv_states[self.layer_idx].shape[0] == batch_size:
914
+ conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
915
+ conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
916
+ conv_state[:, :, -1] = hidden_states[:, :, 0]
917
+ cache_params.conv_states[self.layer_idx] = conv_state
918
+ hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
919
+ if self.use_conv_bias:
920
+ hidden_states += self.conv1d.bias
921
+ hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) # [batch, intermediate_size, 1] : decoding
922
+ else:
923
+ conv_state = nn.functional.pad(
924
+ hidden_states,
925
+ (self.conv_kernel_size - hidden_states.shape[-1], 0)
926
+ )
927
+ cache_params.conv_states[self.layer_idx] = conv_state
928
+ hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
929
+ else:
930
+ ssm_state = torch.zeros(
931
+ (batch_size, self.intermediate_size, self.ssm_state_size),
932
+ device=hidden_states.device, dtype=dtype
933
+ )
934
+ hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
935
+
936
+ # 3. State Space Model sequence transformation
937
+ # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
938
+ ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
939
+ time_step, B, C = torch.split(
940
+ ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
941
+ )
942
+
943
+ time_step = self.dt_layernorm(time_step)
944
+ B = self.b_layernorm(B)
945
+ C = self.c_layernorm(C)
946
+
947
+ discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size]
948
+ discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # [batch, intermediate_size, seq_len]
949
+
950
+ # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
951
+ A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size]
952
+ discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size]
953
+ discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size]
954
+ deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
955
+
956
+ # 3.c perform the recurrence y ← SSM(A, B, C)(x)
957
+ scan_outputs = []
958
+ for i in range(seq_len):
959
+ ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] # [batch, intermediade_size, ssm_state]
960
+ scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) # [batch, intermediade_size, 1]
961
+ scan_outputs.append(scan_output[:, :, 0])
962
+ scan_output = torch.stack(scan_outputs, dim=-1) # [batch, intermediade_size, seq_len]
963
+ scan_output = scan_output + (hidden_states * self.D[None, :, None])
964
+ scan_output = (scan_output * self.act(gate))
965
+
966
+ if use_cache:
967
+ cache_params.ssm_states[self.layer_idx] = ssm_state
968
+
969
+ # 4. Final linear projection
970
+ contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size]
971
+ return contextualized_states
972
+ # fmt: on
973
+
974
+ def forward(self, hidden_states, cache_params: HybridMambaAttentionDynamicCache = None):
975
+ if self.use_fast_kernels:
976
+ if not is_fast_path_available or "cuda" not in self.x_proj.weight.device.type:
977
+ raise ValueError(
978
+ "Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device"
979
+ )
980
+ return self.cuda_kernels_forward(hidden_states, cache_params)
981
+ return self.slow_forward(hidden_states, cache_params)
982
+
983
+
984
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Jamba
985
+ class JambaMLP(nn.Module):
986
+ def __init__(self, config):
987
+ super().__init__()
988
+ self.config = config
989
+ self.hidden_size = config.hidden_size
990
+ self.intermediate_size = config.intermediate_size
991
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
992
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
993
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
994
+ self.act_fn = ACT2FN[config.hidden_act]
995
+
996
+ def forward(self, x):
997
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
998
+
999
+
1000
+ # Adapted from transformers.models.mixtral.modeling_mixtral.MixtralSparseMoeBlock with Mistral->Jamba
1001
+ class JambaSparseMoeBlock(nn.Module):
1002
+ """
1003
+ This implementation is
1004
+ strictly equivalent to standard MoE with full capacity (no
1005
+ dropped tokens). It's faster since it formulates MoE operations
1006
+ in terms of block-sparse operations to accomodate imbalanced
1007
+ assignments of tokens to experts, whereas standard MoE either
1008
+ (1) drop tokens at the cost of reduced performance or (2) set
1009
+ capacity factor to number of experts and thus waste computation
1010
+ and memory on padding.
1011
+ """
1012
+
1013
+ def __init__(self, config: JambaConfig):
1014
+ super().__init__()
1015
+ self.hidden_dim = config.hidden_size
1016
+ self.ffn_dim = config.intermediate_size
1017
+ self.num_experts = config.num_experts
1018
+ self.top_k = config.num_experts_per_tok
1019
+
1020
+ self.router = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
1021
+ self.experts = nn.ModuleList([JambaMLP(config) for _ in range(self.num_experts)])
1022
+
1023
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
1024
+ """ """
1025
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
1026
+
1027
+ hidden_states = hidden_states.view(-1, hidden_dim)
1028
+ # router_logits: (batch * sequence_length, n_experts)
1029
+ router_logits = self.router(hidden_states)
1030
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
1031
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
1032
+ # we cast back to the input dtype
1033
+ routing_weights = routing_weights.to(hidden_states.dtype)
1034
+
1035
+ final_hidden_states = torch.zeros(
1036
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
1037
+ )
1038
+
1039
+ # One hot encode the selected experts to create an expert mask
1040
+ # this will be used to easily index which expert is going to be sollicitated
1041
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
1042
+
1043
+ # Loop over all available experts in the model and perform the computation on each expert
1044
+ for expert_idx in range(self.num_experts):
1045
+ expert_layer = self.experts[expert_idx]
1046
+ idx, top_x = torch.where(expert_mask[expert_idx])
1047
+
1048
+ if top_x.shape[0] == 0:
1049
+ continue
1050
+
1051
+ # Index the correct hidden states and compute the expert hidden state for
1052
+ # the current expert. We need to make sure to multiply the output hidden
1053
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
1054
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
1055
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
1056
+
1057
+ # However `index_add_` only support torch tensors for indexing so we'll use
1058
+ # the `top_x` tensor here.
1059
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
1060
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
1061
+ return final_hidden_states, router_logits
1062
+
1063
+
1064
+ class JambaAttentionDecoderLayer(nn.Module):
1065
+ def __init__(self, config: JambaConfig, layer_idx: int):
1066
+ super().__init__()
1067
+ num_experts = config.layers_num_experts[layer_idx]
1068
+ self.self_attn = JAMBA_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
1069
+
1070
+ ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP
1071
+ self.feed_forward = ffn_layer_class(config)
1072
+ self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1073
+ self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1074
+
1075
+ def forward(
1076
+ self,
1077
+ hidden_states: torch.Tensor,
1078
+ attention_mask: Optional[torch.Tensor] = None,
1079
+ position_ids: Optional[torch.LongTensor] = None,
1080
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
1081
+ output_attentions: Optional[bool] = False,
1082
+ output_router_logits: Optional[bool] = False,
1083
+ use_cache: Optional[bool] = False,
1084
+ cache_position: Optional[torch.LongTensor] = None,
1085
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1086
+ """
1087
+ Args:
1088
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1089
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1090
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1091
+ past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
1092
+ output_attentions (`bool`, *optional*):
1093
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1094
+ returned tensors for more detail.
1095
+ output_router_logits (`bool`, *optional*):
1096
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1097
+ should not be returned during inference.
1098
+ use_cache (`bool`, *optional*):
1099
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1100
+ (see `past_key_values`).
1101
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1102
+ Indices depicting the position of the input sequence tokens in the sequence.
1103
+ """
1104
+
1105
+ residual = hidden_states
1106
+
1107
+ hidden_states = self.input_layernorm(hidden_states)
1108
+
1109
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1110
+ hidden_states=hidden_states,
1111
+ attention_mask=attention_mask,
1112
+ position_ids=position_ids,
1113
+ past_key_value=past_key_value,
1114
+ output_attentions=output_attentions,
1115
+ use_cache=use_cache,
1116
+ cache_position=cache_position,
1117
+ )
1118
+
1119
+ # residual connection after attention
1120
+ hidden_states = residual + hidden_states
1121
+
1122
+ # feed-forward (experts/MLP)
1123
+ residual = hidden_states
1124
+ hidden_states = self.pre_ff_layernorm(hidden_states)
1125
+ ff_outputs = self.feed_forward(hidden_states)
1126
+ if isinstance(ff_outputs, tuple):
1127
+ hidden_states, router_logits = ff_outputs
1128
+ else:
1129
+ hidden_states, router_logits = ff_outputs, None
1130
+ hidden_states = residual + hidden_states
1131
+
1132
+ outputs = (hidden_states,)
1133
+
1134
+ if output_attentions:
1135
+ outputs += (self_attn_weights,)
1136
+
1137
+ if use_cache:
1138
+ outputs += (present_key_value,)
1139
+
1140
+ if output_router_logits:
1141
+ outputs += (router_logits,)
1142
+
1143
+ return outputs
1144
+
1145
+
1146
+ class JambaMambaDecoderLayer(nn.Module):
1147
+ def __init__(self, config: JambaConfig, layer_idx: int):
1148
+ super().__init__()
1149
+ num_experts = config.layers_num_experts[layer_idx]
1150
+ self.mamba = JambaMambaMixer(config=config, layer_idx=layer_idx)
1151
+
1152
+ ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP
1153
+ self.feed_forward = ffn_layer_class(config)
1154
+ self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1155
+ self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1156
+
1157
+ def forward(
1158
+ self,
1159
+ hidden_states: torch.Tensor,
1160
+ attention_mask: Optional[torch.Tensor] = None,
1161
+ position_ids: Optional[torch.LongTensor] = None,
1162
+ past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
1163
+ output_attentions: Optional[bool] = False,
1164
+ output_router_logits: Optional[bool] = False,
1165
+ use_cache: Optional[bool] = False,
1166
+ cache_position: Optional[torch.LongTensor] = None,
1167
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1168
+ """
1169
+ Args:
1170
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1171
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1172
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1173
+ past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
1174
+ output_attentions (`bool`, *optional*):
1175
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1176
+ returned tensors for more detail.
1177
+ output_router_logits (`bool`, *optional*):
1178
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1179
+ should not be returned during inference.
1180
+ use_cache (`bool`, *optional*):
1181
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1182
+ (see `past_key_values`).
1183
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1184
+ Indices depicting the position of the input sequence tokens in the sequence.
1185
+ """
1186
+
1187
+ residual = hidden_states
1188
+
1189
+ hidden_states = self.input_layernorm(hidden_states)
1190
+
1191
+ hidden_states = self.mamba(
1192
+ hidden_states=hidden_states,
1193
+ cache_params=past_key_value,
1194
+ )
1195
+ self_attn_weights = None
1196
+
1197
+ # residual connection after mamba
1198
+ hidden_states = residual + hidden_states
1199
+
1200
+ # feed-forward (experts/MLP)
1201
+ residual = hidden_states
1202
+ hidden_states = self.pre_ff_layernorm(hidden_states)
1203
+ ff_outputs = self.feed_forward(hidden_states)
1204
+ if isinstance(ff_outputs, tuple):
1205
+ hidden_states, router_logits = ff_outputs
1206
+ else:
1207
+ hidden_states, router_logits = ff_outputs, None
1208
+ hidden_states = residual + hidden_states
1209
+
1210
+ outputs = (hidden_states,)
1211
+
1212
+ if output_attentions:
1213
+ outputs += (self_attn_weights,)
1214
+
1215
+ if use_cache:
1216
+ outputs += (past_key_value,)
1217
+
1218
+ if output_router_logits:
1219
+ outputs += (router_logits,)
1220
+
1221
+ return outputs
1222
+
1223
+
1224
+ JAMBA_START_DOCSTRING = r"""
1225
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1226
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1227
+ etc.)
1228
+
1229
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1230
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1231
+ and behavior.
1232
+
1233
+ Parameters:
1234
+ config ([`JambaConfig`]):
1235
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1236
+ load the weights associated with the model, only the configuration. Check out the
1237
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1238
+ """
1239
+
1240
+
1241
+ @add_start_docstrings(
1242
+ "The bare Jamba Model outputting raw hidden-states without any specific head on top.",
1243
+ JAMBA_START_DOCSTRING,
1244
+ )
1245
+ class JambaPreTrainedModel(PreTrainedModel):
1246
+ config_class = JambaConfig
1247
+ base_model_prefix = "model"
1248
+ supports_gradient_checkpointing = True
1249
+ _no_split_modules = ["JambaAttentionDecoderLayer", "JambaMambaDecoderLayer"]
1250
+ _skip_keys_device_placement = "past_key_values"
1251
+ _supports_flash_attn_2 = True
1252
+ _supports_sdpa = True
1253
+ _supports_cache_class = True
1254
+
1255
+ def _init_weights(self, module):
1256
+ std = self.config.initializer_range
1257
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
1258
+ module.weight.data.normal_(mean=0.0, std=std)
1259
+ if module.bias is not None:
1260
+ module.bias.data.zero_()
1261
+ elif isinstance(module, nn.Embedding):
1262
+ module.weight.data.normal_(mean=0.0, std=std)
1263
+ if module.padding_idx is not None:
1264
+ module.weight.data[module.padding_idx].zero_()
1265
+
1266
+
1267
+ JAMBA_INPUTS_DOCSTRING = r"""
1268
+ Args:
1269
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1270
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1271
+ it.
1272
+
1273
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1274
+ [`PreTrainedTokenizer.__call__`] for details.
1275
+
1276
+ [What are input IDs?](../glossary#input-ids)
1277
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1278
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1279
+
1280
+ - 1 for tokens that are **not masked**,
1281
+ - 0 for tokens that are **masked**.
1282
+
1283
+ [What are attention masks?](../glossary#attention-mask)
1284
+
1285
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1286
+ [`PreTrainedTokenizer.__call__`] for details.
1287
+
1288
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1289
+ `past_key_values`).
1290
+
1291
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1292
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1293
+ information on the default strategy.
1294
+
1295
+ - 1 indicates the head is **not masked**,
1296
+ - 0 indicates the head is **masked**.
1297
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1298
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1299
+ config.n_positions - 1]`.
1300
+
1301
+ [What are position IDs?](../glossary#position-ids)
1302
+ past_key_values (`HybridMambaAttentionDynamicCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1303
+ A HybridMambaAttentionDynamicCache object containing pre-computed hidden-states (keys and values in the
1304
+ self-attention blocks and convolution and ssm states in the mamba blocks) that can be used (see
1305
+ `past_key_values` input) to speed up sequential decoding.
1306
+ Key and value cache tensors have shape `(batch_size, num_heads, seq_len, head_dim)`.
1307
+ Convolution and ssm states tensors have shape `(batch_size, d_inner, d_conv)` and
1308
+ `(batch_size, d_inner, d_state)` respectively.
1309
+ See the `HybridMambaAttentionDynamicCache` class for more details.
1310
+
1311
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
1312
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1313
+ `input_ids` of shape `(batch_size, sequence_length)`.
1314
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1315
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1316
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1317
+ model's internal embedding lookup matrix.
1318
+ use_cache (`bool`, *optional*):
1319
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1320
+ `past_key_values`).
1321
+ output_attentions (`bool`, *optional*):
1322
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1323
+ tensors for more detail.
1324
+ output_hidden_states (`bool`, *optional*):
1325
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1326
+ more detail.
1327
+ output_router_logits (`bool`, *optional*):
1328
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1329
+ should not be returned during inference.
1330
+ return_dict (`bool`, *optional*):
1331
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1332
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1333
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1334
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1335
+ the complete sequence length.
1336
+ """
1337
+
1338
+ ALL_DECODER_LAYER_TYPES = {"attention": JambaAttentionDecoderLayer, "mamba": JambaMambaDecoderLayer}
1339
+
1340
+
1341
+ @add_start_docstrings(
1342
+ "The bare Jamba Model outputting raw hidden-states without any specific head on top.",
1343
+ JAMBA_START_DOCSTRING,
1344
+ )
1345
+ # Adapted from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->JAMBA, Mistral->Jamba
1346
+ class JambaModel(JambaPreTrainedModel):
1347
+ """
1348
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`JambaDecoderLayer`]
1349
+
1350
+ Args:
1351
+ config: JambaConfig
1352
+ """
1353
+
1354
+ def __init__(self, config: JambaConfig):
1355
+ super().__init__(config)
1356
+ self.padding_idx = config.pad_token_id
1357
+ self.vocab_size = config.vocab_size
1358
+
1359
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1360
+ decoder_layers = []
1361
+ for i in range(config.num_hidden_layers):
1362
+ layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]]
1363
+ decoder_layers.append(layer_class(config, layer_idx=i))
1364
+ self.layers = nn.ModuleList(decoder_layers)
1365
+
1366
+ self._attn_implementation = config._attn_implementation
1367
+ self.final_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1368
+
1369
+ self.gradient_checkpointing = False
1370
+ # Initialize weights and apply final processing
1371
+ self.post_init()
1372
+
1373
+ def get_input_embeddings(self):
1374
+ return self.embed_tokens
1375
+
1376
+ def set_input_embeddings(self, value):
1377
+ self.embed_tokens = value
1378
+
1379
+ @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
1380
+ def forward(
1381
+ self,
1382
+ input_ids: torch.LongTensor = None,
1383
+ attention_mask: Optional[torch.Tensor] = None,
1384
+ position_ids: Optional[torch.LongTensor] = None,
1385
+ past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
1386
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1387
+ use_cache: Optional[bool] = None,
1388
+ output_attentions: Optional[bool] = None,
1389
+ output_hidden_states: Optional[bool] = None,
1390
+ output_router_logits: Optional[bool] = None,
1391
+ return_dict: Optional[bool] = None,
1392
+ cache_position: Optional[torch.LongTensor] = None,
1393
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
1394
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1395
+ output_router_logits = (
1396
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1397
+ )
1398
+ output_hidden_states = (
1399
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1400
+ )
1401
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1402
+
1403
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1404
+
1405
+ if (input_ids is None) ^ (inputs_embeds is not None):
1406
+ raise ValueError(
1407
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
1408
+ )
1409
+
1410
+ if self.gradient_checkpointing and self.training and use_cache:
1411
+ logger.warning_once(
1412
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1413
+ )
1414
+ use_cache = False
1415
+
1416
+ if inputs_embeds is None:
1417
+ inputs_embeds = self.embed_tokens(input_ids)
1418
+ hidden_states = inputs_embeds
1419
+
1420
+ if use_cache and past_key_values is None:
1421
+ logger.warning_once(
1422
+ "Jamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was "
1423
+ "provided, so no cache will be returned."
1424
+ )
1425
+
1426
+ if cache_position is None:
1427
+ cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
1428
+ if position_ids is None:
1429
+ position_ids = cache_position.unsqueeze(0)
1430
+
1431
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
1432
+
1433
+ all_hidden_states = () if output_hidden_states else None
1434
+ all_self_attns = () if output_attentions else None
1435
+ all_router_logits = () if output_router_logits else None
1436
+
1437
+ for decoder_layer in self.layers:
1438
+ if output_hidden_states:
1439
+ all_hidden_states += (hidden_states,)
1440
+
1441
+ if self.gradient_checkpointing and self.training:
1442
+ layer_outputs = self._gradient_checkpointing_func(
1443
+ decoder_layer.__call__,
1444
+ hidden_states,
1445
+ causal_mask,
1446
+ position_ids,
1447
+ past_key_values,
1448
+ output_attentions,
1449
+ output_router_logits,
1450
+ use_cache,
1451
+ cache_position,
1452
+ )
1453
+ else:
1454
+ layer_outputs = decoder_layer(
1455
+ hidden_states,
1456
+ attention_mask=causal_mask,
1457
+ position_ids=position_ids,
1458
+ past_key_value=past_key_values,
1459
+ output_attentions=output_attentions,
1460
+ output_router_logits=output_router_logits,
1461
+ use_cache=use_cache,
1462
+ cache_position=cache_position,
1463
+ )
1464
+
1465
+ hidden_states = layer_outputs[0]
1466
+
1467
+ if output_attentions:
1468
+ if layer_outputs[1] is not None:
1469
+ # append attentions only of attention layers. Mamba layers return `None` as the attention weights
1470
+ all_self_attns += (layer_outputs[1],)
1471
+
1472
+ if output_router_logits:
1473
+ if layer_outputs[-1] is not None:
1474
+ # append router logits only of expert layers. Regular MLP layers return `None` as the router logits
1475
+ all_router_logits += (layer_outputs[-1],)
1476
+
1477
+ hidden_states = self.final_layernorm(hidden_states)
1478
+
1479
+ # add hidden states from the last decoder layer
1480
+ if output_hidden_states:
1481
+ all_hidden_states += (hidden_states,)
1482
+
1483
+ if past_key_values and not past_key_values.has_previous_state:
1484
+ past_key_values.has_previous_state = True
1485
+
1486
+ next_cache = None if not use_cache else past_key_values
1487
+
1488
+ if not return_dict:
1489
+ return tuple(
1490
+ v
1491
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1492
+ if v is not None
1493
+ )
1494
+ return MoeModelOutputWithPast(
1495
+ last_hidden_state=hidden_states,
1496
+ past_key_values=next_cache,
1497
+ hidden_states=all_hidden_states,
1498
+ attentions=all_self_attns,
1499
+ router_logits=all_router_logits,
1500
+ )
1501
+
1502
+ def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
1503
+ if self.config._attn_implementation == "flash_attention_2":
1504
+ if attention_mask is not None and 0.0 in attention_mask:
1505
+ return attention_mask
1506
+ return None
1507
+
1508
+ dtype, device = input_tensor.dtype, input_tensor.device
1509
+ min_dtype = torch.finfo(dtype).min
1510
+ sequence_length = input_tensor.shape[1]
1511
+ target_length = cache_position[-1] + 1
1512
+
1513
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1514
+ if sequence_length != 1:
1515
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1516
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1517
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1518
+ if attention_mask is not None:
1519
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1520
+ if attention_mask.dim() == 2:
1521
+ mask_length = attention_mask.shape[-1]
1522
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1523
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1524
+
1525
+ if (
1526
+ self.config._attn_implementation == "sdpa"
1527
+ and attention_mask is not None
1528
+ and attention_mask.device.type == "cuda"
1529
+ ):
1530
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1531
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1532
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1533
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1534
+
1535
+ return causal_mask
1536
+
1537
+
1538
+ # Adapted from transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM with MIXTRAL->JAMBA, Mixtral->Jamba
1539
+ class JambaForCausalLM(JambaPreTrainedModel):
1540
+ _tied_weights_keys = ["lm_head.weight"]
1541
+
1542
+ def __init__(self, config: JambaConfig):
1543
+ super().__init__(config)
1544
+ self.model = JambaModel(config)
1545
+ self.vocab_size = config.vocab_size
1546
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1547
+ self.router_aux_loss_coef = config.router_aux_loss_coef
1548
+ self.num_experts = config.num_experts
1549
+ self.num_experts_per_tok = config.num_experts_per_tok
1550
+ # Initialize weights and apply final processing
1551
+ self.post_init()
1552
+
1553
+ def get_input_embeddings(self):
1554
+ return self.model.embed_tokens
1555
+
1556
+ def set_input_embeddings(self, value):
1557
+ self.model.embed_tokens = value
1558
+
1559
+ def get_output_embeddings(self):
1560
+ return self.lm_head
1561
+
1562
+ def set_output_embeddings(self, new_embeddings):
1563
+ self.lm_head = new_embeddings
1564
+
1565
+ def set_decoder(self, decoder):
1566
+ self.model = decoder
1567
+
1568
+ def get_decoder(self):
1569
+ return self.model
1570
+
1571
+ @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
1572
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1573
+ # Ignore copy
1574
+ def forward(
1575
+ self,
1576
+ input_ids: torch.LongTensor = None,
1577
+ attention_mask: Optional[torch.Tensor] = None,
1578
+ position_ids: Optional[torch.LongTensor] = None,
1579
+ past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
1580
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1581
+ labels: Optional[torch.LongTensor] = None,
1582
+ use_cache: Optional[bool] = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ output_router_logits: Optional[bool] = None,
1586
+ return_dict: Optional[bool] = None,
1587
+ cache_position: Optional[torch.LongTensor] = None,
1588
+ num_logits_to_keep: Optional[Union[int, None]] = None,
1589
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1590
+ r"""
1591
+ Args:
1592
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1593
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1594
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1595
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1596
+
1597
+ num_logits_to_keep (`int` or `None`, *optional*):
1598
+ Calculate logits for the last `num_logits_to_keep` tokens. If `None`, calculate logits for all
1599
+ `input_ids`. Only last token logits are needed for generation, and calculating them only for that token
1600
+ can save memory, which becomes pretty significant for long sequences.
1601
+
1602
+ Returns:
1603
+
1604
+ Example:
1605
+
1606
+ ```python
1607
+ >>> from transformers import AutoTokenizer, JambaForCausalLM
1608
+
1609
+ >>> model = JambaForCausalLM.from_pretrained("ai21labs/Jamba-v0.1")
1610
+ >>> tokenizer = AutoTokenizer.from_pretrained("ai21labs/Jamba-v0.1")
1611
+
1612
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1613
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1614
+
1615
+ >>> # Generate
1616
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1617
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1618
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1619
+ ```"""
1620
+
1621
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1622
+ output_router_logits = (
1623
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1624
+ )
1625
+
1626
+ output_hidden_states = (
1627
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1628
+ )
1629
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1630
+
1631
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1632
+ outputs = self.model(
1633
+ input_ids=input_ids,
1634
+ attention_mask=attention_mask,
1635
+ position_ids=position_ids,
1636
+ past_key_values=past_key_values,
1637
+ inputs_embeds=inputs_embeds,
1638
+ use_cache=use_cache,
1639
+ output_attentions=output_attentions,
1640
+ output_hidden_states=output_hidden_states,
1641
+ output_router_logits=output_router_logits,
1642
+ cache_position=cache_position,
1643
+ return_dict=return_dict,
1644
+ )
1645
+
1646
+ hidden_states = outputs[0]
1647
+ if num_logits_to_keep is None:
1648
+ logits = self.lm_head(hidden_states)
1649
+ else:
1650
+ logits = self.lm_head(hidden_states[..., -num_logits_to_keep:, :])
1651
+ logits = logits.float()
1652
+
1653
+ loss = None
1654
+ if labels is not None:
1655
+ # Shift so that tokens < n predict n
1656
+ shift_logits = logits[..., :-1, :].contiguous()
1657
+ shift_labels = labels[..., 1:].contiguous()
1658
+ # Flatten the tokens
1659
+ loss_fct = CrossEntropyLoss()
1660
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1661
+ shift_labels = shift_labels.view(-1)
1662
+ # Enable model parallelism
1663
+ shift_labels = shift_labels.to(shift_logits.device)
1664
+ loss = loss_fct(shift_logits, shift_labels)
1665
+
1666
+ aux_loss = None
1667
+ if output_router_logits:
1668
+ aux_loss = load_balancing_loss_func(
1669
+ outputs.router_logits if return_dict else outputs[-1],
1670
+ self.num_experts,
1671
+ self.num_experts_per_tok,
1672
+ attention_mask,
1673
+ )
1674
+ if labels is not None:
1675
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1676
+
1677
+ if not return_dict:
1678
+ output = (logits,) + outputs[1:]
1679
+ if output_router_logits:
1680
+ output = (aux_loss,) + output
1681
+ return (loss,) + output if loss is not None else output
1682
+
1683
+ return MoeCausalLMOutputWithPast(
1684
+ loss=loss,
1685
+ aux_loss=aux_loss,
1686
+ logits=logits,
1687
+ past_key_values=outputs.past_key_values,
1688
+ hidden_states=outputs.hidden_states,
1689
+ attentions=outputs.attentions,
1690
+ router_logits=outputs.router_logits,
1691
+ )
1692
+
1693
+ def prepare_inputs_for_generation(
1694
+ self,
1695
+ input_ids,
1696
+ past_key_values=None,
1697
+ attention_mask=None,
1698
+ inputs_embeds=None,
1699
+ output_router_logits=False,
1700
+ cache_position=None,
1701
+ **kwargs,
1702
+ ):
1703
+ empty_past_kv = past_key_values is None
1704
+
1705
+ # Omit tokens covered by past_key_values
1706
+ if not empty_past_kv:
1707
+ past_length = cache_position[0] if cache_position is not None else attention_mask.shape[1]
1708
+ max_cache_length = self.config.sliding_window
1709
+ # Keep only the unprocessed tokens:
1710
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1711
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1712
+ # input)
1713
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1714
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1715
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1716
+ # input_ids based on the past_length.
1717
+ elif past_length < input_ids.shape[1]:
1718
+ input_ids = input_ids[:, past_length:]
1719
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1720
+
1721
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1722
+ if (
1723
+ max_cache_length is not None
1724
+ and attention_mask is not None
1725
+ and past_length + input_ids.shape[1] > max_cache_length
1726
+ ):
1727
+ attention_mask = attention_mask[:, -max_cache_length:]
1728
+ else:
1729
+ past_key_values = HybridMambaAttentionDynamicCache(
1730
+ self.config, input_ids.shape[0], self.dtype, device=self.device
1731
+ )
1732
+
1733
+ position_ids = kwargs.get("position_ids", None)
1734
+ if attention_mask is not None and position_ids is None:
1735
+ # create position_ids on the fly for batch generation
1736
+ position_ids = attention_mask.long().cumsum(-1) - 1
1737
+ position_ids.masked_fill_(attention_mask == 0, 1)
1738
+ if not empty_past_kv:
1739
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1740
+
1741
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1742
+ if inputs_embeds is not None and empty_past_kv:
1743
+ model_inputs = {"inputs_embeds": inputs_embeds}
1744
+ else:
1745
+ model_inputs = {"input_ids": input_ids}
1746
+
1747
+ model_inputs.update(
1748
+ {
1749
+ "position_ids": position_ids,
1750
+ "past_key_values": past_key_values,
1751
+ "use_cache": kwargs.get("use_cache"),
1752
+ "attention_mask": attention_mask,
1753
+ "output_router_logits": output_router_logits,
1754
+ "num_logits_to_keep": self.config.num_logits_to_keep,
1755
+ "cache_position": cache_position,
1756
+ }
1757
+ )
1758
+ return model_inputs
1759
+
1760
+
1761
+ @add_start_docstrings(
1762
+ """
1763
+ The Jamba Model with a sequence classification head on top (linear layer).
1764
+
1765
+ [`JambaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1766
+ (e.g. GPT-2) do.
1767
+
1768
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1769
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1770
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1771
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1772
+ each row of the batch).
1773
+ """,
1774
+ JAMBA_START_DOCSTRING,
1775
+ )
1776
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralForSequenceClassification with Mixtral->Jamba, MIXTRAL->JAMBA
1777
+ class JambaForSequenceClassification(JambaPreTrainedModel):
1778
+ def __init__(self, config):
1779
+ super().__init__(config)
1780
+ self.num_labels = config.num_labels
1781
+ self.model = JambaModel(config)
1782
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1783
+
1784
+ # Initialize weights and apply final processing
1785
+ self.post_init()
1786
+
1787
+ def get_input_embeddings(self):
1788
+ return self.model.embed_tokens
1789
+
1790
+ def set_input_embeddings(self, value):
1791
+ self.model.embed_tokens = value
1792
+
1793
+ @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
1794
+ def forward(
1795
+ self,
1796
+ input_ids: torch.LongTensor = None,
1797
+ attention_mask: Optional[torch.Tensor] = None,
1798
+ position_ids: Optional[torch.LongTensor] = None,
1799
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1800
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1801
+ labels: Optional[torch.LongTensor] = None,
1802
+ use_cache: Optional[bool] = None,
1803
+ output_attentions: Optional[bool] = None,
1804
+ output_hidden_states: Optional[bool] = None,
1805
+ return_dict: Optional[bool] = None,
1806
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1807
+ r"""
1808
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1809
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1810
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1811
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1812
+ """
1813
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1814
+
1815
+ transformer_outputs = self.model(
1816
+ input_ids,
1817
+ attention_mask=attention_mask,
1818
+ position_ids=position_ids,
1819
+ past_key_values=past_key_values,
1820
+ inputs_embeds=inputs_embeds,
1821
+ use_cache=use_cache,
1822
+ output_attentions=output_attentions,
1823
+ output_hidden_states=output_hidden_states,
1824
+ return_dict=return_dict,
1825
+ )
1826
+ hidden_states = transformer_outputs[0]
1827
+ logits = self.score(hidden_states)
1828
+
1829
+ if input_ids is not None:
1830
+ batch_size = input_ids.shape[0]
1831
+ else:
1832
+ batch_size = inputs_embeds.shape[0]
1833
+
1834
+ if self.config.pad_token_id is None and batch_size != 1:
1835
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1836
+ if self.config.pad_token_id is None:
1837
+ sequence_lengths = -1
1838
+ else:
1839
+ if input_ids is not None:
1840
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1841
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1842
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1843
+ sequence_lengths = sequence_lengths.to(logits.device)
1844
+ else:
1845
+ sequence_lengths = -1
1846
+
1847
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1848
+
1849
+ loss = None
1850
+ if labels is not None:
1851
+ labels = labels.to(logits.device)
1852
+ if self.config.problem_type is None:
1853
+ if self.num_labels == 1:
1854
+ self.config.problem_type = "regression"
1855
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1856
+ self.config.problem_type = "single_label_classification"
1857
+ else:
1858
+ self.config.problem_type = "multi_label_classification"
1859
+
1860
+ if self.config.problem_type == "regression":
1861
+ loss_fct = MSELoss()
1862
+ if self.num_labels == 1:
1863
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1864
+ else:
1865
+ loss = loss_fct(pooled_logits, labels)
1866
+ elif self.config.problem_type == "single_label_classification":
1867
+ loss_fct = CrossEntropyLoss()
1868
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1869
+ elif self.config.problem_type == "multi_label_classification":
1870
+ loss_fct = BCEWithLogitsLoss()
1871
+ loss = loss_fct(pooled_logits, labels)
1872
+ if not return_dict:
1873
+ output = (pooled_logits,) + transformer_outputs[1:]
1874
+ return ((loss,) + output) if loss is not None else output
1875
+
1876
+ return SequenceClassifierOutputWithPast(
1877
+ loss=loss,
1878
+ logits=pooled_logits,
1879
+ past_key_values=transformer_outputs.past_key_values,
1880
+ hidden_states=transformer_outputs.hidden_states,
1881
+ attentions=transformer_outputs.attentions,
1882
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert LUKE checkpoint."""
16
+
17
+ import argparse
18
+ import json
19
+ import os
20
+
21
+ import torch
22
+
23
+ from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
24
+ from transformers.tokenization_utils_base import AddedToken
25
+
26
+
27
+ @torch.no_grad()
28
+ def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size):
29
+ # Load configuration defined in the metadata file
30
+ with open(metadata_path) as metadata_file:
31
+ metadata = json.load(metadata_file)
32
+ config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"])
33
+
34
+ # Load in the weights from the checkpoint_path
35
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
36
+
37
+ # Load the entity vocab file
38
+ entity_vocab = load_entity_vocab(entity_vocab_path)
39
+
40
+ tokenizer = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
41
+
42
+ # Add special tokens to the token vocabulary for downstream tasks
43
+ entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False)
44
+ entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False)
45
+ tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]})
46
+ config.vocab_size += 2
47
+
48
+ print(f"Saving tokenizer to {pytorch_dump_folder_path}")
49
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
50
+ with open(os.path.join(pytorch_dump_folder_path, LukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f:
51
+ json.dump(entity_vocab, f)
52
+
53
+ tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path)
54
+
55
+ # Initialize the embeddings of the special tokens
56
+ word_emb = state_dict["embeddings.word_embeddings.weight"]
57
+ ent_emb = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0)
58
+ ent2_emb = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0)
59
+ state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb])
60
+
61
+ # Initialize the query layers of the entity-aware self-attention mechanism
62
+ for layer_index in range(config.num_hidden_layers):
63
+ for matrix_name in ["query.weight", "query.bias"]:
64
+ prefix = f"encoder.layer.{layer_index}.attention.self."
65
+ state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name]
66
+ state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name]
67
+ state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name]
68
+
69
+ # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
70
+ entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"]
71
+ entity_emb[entity_vocab["[MASK2]"]] = entity_emb[entity_vocab["[MASK]"]]
72
+
73
+ model = LukeModel(config=config).eval()
74
+
75
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
76
+ if not (len(missing_keys) == 1 and missing_keys[0] == "embeddings.position_ids"):
77
+ raise ValueError(f"Missing keys {', '.join(missing_keys)}. Expected only missing embeddings.position_ids")
78
+ if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)):
79
+ raise ValueError(
80
+ "Unexpected keys"
81
+ f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}"
82
+ )
83
+
84
+ # Check outputs
85
+ tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification")
86
+
87
+ text = (
88
+ "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
89
+ " new world number one avoid a humiliating second- round exit at Wimbledon ."
90
+ )
91
+ span = (39, 42)
92
+ encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
93
+
94
+ outputs = model(**encoding)
95
+
96
+ # Verify word hidden states
97
+ if model_size == "large":
98
+ expected_shape = torch.Size((1, 42, 1024))
99
+ expected_slice = torch.tensor(
100
+ [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
101
+ )
102
+ else: # base
103
+ expected_shape = torch.Size((1, 42, 768))
104
+ expected_slice = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]])
105
+
106
+ if not (outputs.last_hidden_state.shape == expected_shape):
107
+ raise ValueError(
108
+ f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}"
109
+ )
110
+ if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
111
+ raise ValueError
112
+
113
+ # Verify entity hidden states
114
+ if model_size == "large":
115
+ expected_shape = torch.Size((1, 1, 1024))
116
+ expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]])
117
+ else: # base
118
+ expected_shape = torch.Size((1, 1, 768))
119
+ expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]])
120
+
121
+ if not (outputs.entity_last_hidden_state.shape != expected_shape):
122
+ raise ValueError(
123
+ f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
124
+ f" {expected_shape}"
125
+ )
126
+ if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
127
+ raise ValueError
128
+
129
+ # Finally, save our PyTorch model and tokenizer
130
+ print("Saving PyTorch model to {}".format(pytorch_dump_folder_path))
131
+ model.save_pretrained(pytorch_dump_folder_path)
132
+
133
+
134
+ def load_entity_vocab(entity_vocab_path):
135
+ entity_vocab = {}
136
+ with open(entity_vocab_path, "r", encoding="utf-8") as f:
137
+ for index, line in enumerate(f):
138
+ title, _ = line.rstrip().split("\t")
139
+ entity_vocab[title] = index
140
+
141
+ return entity_vocab
142
+
143
+
144
+ if __name__ == "__main__":
145
+ parser = argparse.ArgumentParser()
146
+ # Required parameters
147
+ parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
148
+ parser.add_argument(
149
+ "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
150
+ )
151
+ parser.add_argument(
152
+ "--entity_vocab_path",
153
+ default=None,
154
+ type=str,
155
+ help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
156
+ )
157
+ parser.add_argument(
158
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
159
+ )
160
+ parser.add_argument(
161
+ "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
162
+ )
163
+ args = parser.parse_args()
164
+ convert_luke_checkpoint(
165
+ args.checkpoint_path,
166
+ args.metadata_path,
167
+ args.entity_vocab_path,
168
+ args.pytorch_dump_folder_path,
169
+ args.model_size,
170
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/luke/modeling_luke.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nystromformer/configuration_nystromformer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 UW-Madison and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Nystromformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class NystromformerConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate
30
+ an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the Nystromformer
32
+ [uw-madison/nystromformer-512](https://huggingface.co/uw-madison/nystromformer-512) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 30000):
39
+ Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented
40
+ by the `inputs_ids` passed when calling [`NystromformerModel`].
41
+ hidden_size (`int`, *optional*, defaults to 768):
42
+ Dimension of the encoder layers and the pooler layer.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ intermediate_size (`int`, *optional*, defaults to 3072):
48
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
49
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
50
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
51
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
52
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
54
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout ratio for the attention probabilities.
56
+ max_position_embeddings (`int`, *optional*, defaults to 512):
57
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
58
+ just in case (e.g., 512 or 1024 or 2048).
59
+ type_vocab_size (`int`, *optional*, defaults to 2):
60
+ The vocabulary size of the `token_type_ids` passed when calling [`NystromformerModel`].
61
+ segment_means_seq_len (`int`, *optional*, defaults to 64):
62
+ Sequence length used in segment-means.
63
+ num_landmarks (`int`, *optional*, defaults to 64):
64
+ The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention
65
+ matrix.
66
+ conv_kernel_size (`int`, *optional*, defaults to 65):
67
+ The kernel size of depthwise convolution used in Nystrom approximation.
68
+ inv_coeff_init_option (`bool`, *optional*, defaults to `False`):
69
+ Whether or not to use exact coefficient computation for the initial values for the iterative method of
70
+ calculating the Moore-Penrose inverse of a matrix.
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
74
+ The epsilon used by the layer normalization layers.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import NystromformerModel, NystromformerConfig
80
+
81
+ >>> # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration
82
+ >>> configuration = NystromformerConfig()
83
+
84
+ >>> # Initializing a model from the uw-madison/nystromformer-512 style configuration
85
+ >>> model = NystromformerModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "nystromformer"
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_size=30000,
96
+ hidden_size=768,
97
+ num_hidden_layers=12,
98
+ num_attention_heads=12,
99
+ intermediate_size=3072,
100
+ hidden_act="gelu_new",
101
+ hidden_dropout_prob=0.1,
102
+ attention_probs_dropout_prob=0.1,
103
+ max_position_embeddings=510,
104
+ type_vocab_size=2,
105
+ segment_means_seq_len=64,
106
+ num_landmarks=64,
107
+ conv_kernel_size=65,
108
+ inv_coeff_init_option=False,
109
+ initializer_range=0.02,
110
+ layer_norm_eps=1e-5,
111
+ pad_token_id=1,
112
+ bos_token_id=0,
113
+ eos_token_id=2,
114
+ **kwargs,
115
+ ):
116
+ self.vocab_size = vocab_size
117
+ self.max_position_embeddings = max_position_embeddings
118
+ self.hidden_size = hidden_size
119
+ self.num_hidden_layers = num_hidden_layers
120
+ self.num_attention_heads = num_attention_heads
121
+ self.intermediate_size = intermediate_size
122
+ self.hidden_act = hidden_act
123
+ self.hidden_dropout_prob = hidden_dropout_prob
124
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
125
+ self.initializer_range = initializer_range
126
+ self.type_vocab_size = type_vocab_size
127
+ self.segment_means_seq_len = segment_means_seq_len
128
+ self.num_landmarks = num_landmarks
129
+ self.conv_kernel_size = conv_kernel_size
130
+ self.inv_coeff_init_option = inv_coeff_init_option
131
+ self.layer_norm_eps = layer_norm_eps
132
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_phobert": ["PhobertTokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_phobert import PhobertTokenizer
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (515 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
3
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Tokenization classes for PhoBERT"""
17
+
18
+
19
+ import os
20
+ import re
21
+ from shutil import copyfile
22
+ from typing import List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.txt",
32
+ "merges_file": "bpe.codes",
33
+ }
34
+
35
+
36
+ def get_pairs(word):
37
+ """
38
+ Return set of symbol pairs in a word.
39
+
40
+ Word is represented as tuple of symbols (symbols being variable-length strings).
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+
48
+ pairs = set(pairs)
49
+ return pairs
50
+
51
+
52
+ class PhobertTokenizer(PreTrainedTokenizer):
53
+ """
54
+ Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding.
55
+
56
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
57
+ this superclass for more information regarding those methods.
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ Path to the vocabulary file.
62
+ merges_file (`str`):
63
+ Path to the merges file.
64
+ bos_token (`st`, *optional*, defaults to `"<s>"`):
65
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
66
+
67
+ <Tip>
68
+
69
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
70
+ sequence. The token used is the `cls_token`.
71
+
72
+ </Tip>
73
+
74
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
75
+ The end of sequence token.
76
+
77
+ <Tip>
78
+
79
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
80
+ The token used is the `sep_token`.
81
+
82
+ </Tip>
83
+
84
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
85
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
86
+ sequence classification or for a text and a question for question answering. It is also used as the last
87
+ token of a sequence built with special tokens.
88
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
89
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
90
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
91
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
92
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
93
+ token instead.
94
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
95
+ The token used for padding, for example when batching sequences of different lengths.
96
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
97
+ The token used for masking values. This is the token used when training this model with masked language
98
+ modeling. This is the token which the model will try to predict.
99
+ """
100
+
101
+ vocab_files_names = VOCAB_FILES_NAMES
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_file,
106
+ merges_file,
107
+ bos_token="<s>",
108
+ eos_token="</s>",
109
+ sep_token="</s>",
110
+ cls_token="<s>",
111
+ unk_token="<unk>",
112
+ pad_token="<pad>",
113
+ mask_token="<mask>",
114
+ **kwargs,
115
+ ):
116
+ self.vocab_file = vocab_file
117
+ self.merges_file = merges_file
118
+
119
+ self.encoder = {}
120
+ self.encoder[str(bos_token)] = 0
121
+ self.encoder[str(pad_token)] = 1
122
+ self.encoder[str(eos_token)] = 2
123
+ self.encoder[str(unk_token)] = 3
124
+
125
+ self.add_from_file(vocab_file)
126
+
127
+ self.decoder = {v: k for k, v in self.encoder.items()}
128
+
129
+ with open(merges_file, encoding="utf-8") as merges_handle:
130
+ merges = merges_handle.read().split("\n")[:-1]
131
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
132
+
133
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
134
+ self.cache = {}
135
+
136
+ super().__init__(
137
+ bos_token=bos_token,
138
+ eos_token=eos_token,
139
+ unk_token=unk_token,
140
+ sep_token=sep_token,
141
+ cls_token=cls_token,
142
+ pad_token=pad_token,
143
+ mask_token=mask_token,
144
+ **kwargs,
145
+ )
146
+
147
+ def build_inputs_with_special_tokens(
148
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
149
+ ) -> List[int]:
150
+ """
151
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
152
+ adding special tokens. A PhoBERT sequence has the following format:
153
+
154
+ - single sequence: `<s> X </s>`
155
+ - pair of sequences: `<s> A </s></s> B </s>`
156
+
157
+ Args:
158
+ token_ids_0 (`List[int]`):
159
+ List of IDs to which the special tokens will be added.
160
+ token_ids_1 (`List[int]`, *optional*):
161
+ Optional second list of IDs for sequence pairs.
162
+
163
+ Returns:
164
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
165
+ """
166
+
167
+ if token_ids_1 is None:
168
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
169
+ cls = [self.cls_token_id]
170
+ sep = [self.sep_token_id]
171
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
172
+
173
+ def get_special_tokens_mask(
174
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
175
+ ) -> List[int]:
176
+ """
177
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
178
+ special tokens using the tokenizer `prepare_for_model` method.
179
+
180
+ Args:
181
+ token_ids_0 (`List[int]`):
182
+ List of IDs.
183
+ token_ids_1 (`List[int]`, *optional*):
184
+ Optional second list of IDs for sequence pairs.
185
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
186
+ Whether or not the token list is already formatted with special tokens for the model.
187
+
188
+ Returns:
189
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
190
+ """
191
+
192
+ if already_has_special_tokens:
193
+ return super().get_special_tokens_mask(
194
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
195
+ )
196
+
197
+ if token_ids_1 is None:
198
+ return [1] + ([0] * len(token_ids_0)) + [1]
199
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
200
+
201
+ def create_token_type_ids_from_sequences(
202
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
203
+ ) -> List[int]:
204
+ """
205
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not
206
+ make use of token type ids, therefore a list of zeros is returned.
207
+
208
+ Args:
209
+ token_ids_0 (`List[int]`):
210
+ List of IDs.
211
+ token_ids_1 (`List[int]`, *optional*):
212
+ Optional second list of IDs for sequence pairs.
213
+
214
+ Returns:
215
+ `List[int]`: List of zeros.
216
+ """
217
+
218
+ sep = [self.sep_token_id]
219
+ cls = [self.cls_token_id]
220
+
221
+ if token_ids_1 is None:
222
+ return len(cls + token_ids_0 + sep) * [0]
223
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
224
+
225
+ @property
226
+ def vocab_size(self):
227
+ return len(self.encoder)
228
+
229
+ def get_vocab(self):
230
+ return dict(self.encoder, **self.added_tokens_encoder)
231
+
232
+ def bpe(self, token):
233
+ if token in self.cache:
234
+ return self.cache[token]
235
+ word = tuple(token)
236
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
237
+ pairs = get_pairs(word)
238
+
239
+ if not pairs:
240
+ return token
241
+
242
+ while True:
243
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
244
+ if bigram not in self.bpe_ranks:
245
+ break
246
+ first, second = bigram
247
+ new_word = []
248
+ i = 0
249
+ while i < len(word):
250
+ try:
251
+ j = word.index(first, i)
252
+ except ValueError:
253
+ new_word.extend(word[i:])
254
+ break
255
+ else:
256
+ new_word.extend(word[i:j])
257
+ i = j
258
+
259
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
260
+ new_word.append(first + second)
261
+ i += 2
262
+ else:
263
+ new_word.append(word[i])
264
+ i += 1
265
+ new_word = tuple(new_word)
266
+ word = new_word
267
+ if len(word) == 1:
268
+ break
269
+ else:
270
+ pairs = get_pairs(word)
271
+ word = "@@ ".join(word)
272
+ word = word[:-4]
273
+ self.cache[token] = word
274
+ return word
275
+
276
+ def _tokenize(self, text):
277
+ """Tokenize a string."""
278
+ split_tokens = []
279
+
280
+ words = re.findall(r"\S+\n?", text)
281
+
282
+ for token in words:
283
+ split_tokens.extend(list(self.bpe(token).split(" ")))
284
+ return split_tokens
285
+
286
+ def _convert_token_to_id(self, token):
287
+ """Converts a token (str) in an id using the vocab."""
288
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
289
+
290
+ def _convert_id_to_token(self, index):
291
+ """Converts an index (integer) in a token (str) using the vocab."""
292
+ return self.decoder.get(index, self.unk_token)
293
+
294
+ def convert_tokens_to_string(self, tokens):
295
+ """Converts a sequence of tokens (string) in a single string."""
296
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
297
+ return out_string
298
+
299
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
300
+ if not os.path.isdir(save_directory):
301
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
302
+ return
303
+ out_vocab_file = os.path.join(
304
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
305
+ )
306
+ out_merge_file = os.path.join(
307
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
308
+ )
309
+
310
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
311
+ copyfile(self.vocab_file, out_vocab_file)
312
+ elif not os.path.isfile(self.vocab_file):
313
+ with open(out_vocab_file, "wb") as fi:
314
+ content_spiece_model = self.sp_model.serialized_model_proto()
315
+ fi.write(content_spiece_model)
316
+
317
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
318
+ copyfile(self.merges_file, out_merge_file)
319
+
320
+ return out_vocab_file, out_merge_file
321
+
322
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
323
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
324
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
325
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
326
+ # return ''.join(tokens_generated_so_far)
327
+
328
+ def add_from_file(self, f):
329
+ """
330
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
331
+ """
332
+ if isinstance(f, str):
333
+ try:
334
+ with open(f, "r", encoding="utf-8") as fd:
335
+ self.add_from_file(fd)
336
+ except FileNotFoundError as fnfe:
337
+ raise fnfe
338
+ except UnicodeError:
339
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
340
+ return
341
+
342
+ lines = f.readlines()
343
+ for lineTmp in lines:
344
+ line = lineTmp.strip()
345
+ idx = line.rfind(" ")
346
+ if idx == -1:
347
+ raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
348
+ word = line[:idx]
349
+ self.encoder[word] = len(self.encoder)
llmeval-env/lib/python3.10/site-packages/transformers/models/roformer/__init__.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
28
+ "tokenization_roformer": ["RoFormerTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_roformer_fast"] = ["RoFormerTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_roformer"] = [
46
+ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "RoFormerForCausalLM",
48
+ "RoFormerForMaskedLM",
49
+ "RoFormerForMultipleChoice",
50
+ "RoFormerForQuestionAnswering",
51
+ "RoFormerForSequenceClassification",
52
+ "RoFormerForTokenClassification",
53
+ "RoFormerLayer",
54
+ "RoFormerModel",
55
+ "RoFormerPreTrainedModel",
56
+ "load_tf_weights_in_roformer",
57
+ ]
58
+
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_roformer"] = [
67
+ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFRoFormerForCausalLM",
69
+ "TFRoFormerForMaskedLM",
70
+ "TFRoFormerForMultipleChoice",
71
+ "TFRoFormerForQuestionAnswering",
72
+ "TFRoFormerForSequenceClassification",
73
+ "TFRoFormerForTokenClassification",
74
+ "TFRoFormerLayer",
75
+ "TFRoFormerModel",
76
+ "TFRoFormerPreTrainedModel",
77
+ ]
78
+
79
+
80
+ try:
81
+ if not is_flax_available():
82
+ raise OptionalDependencyNotAvailable()
83
+ except OptionalDependencyNotAvailable:
84
+ pass
85
+ else:
86
+ _import_structure["modeling_flax_roformer"] = [
87
+ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
88
+ "FlaxRoFormerForMaskedLM",
89
+ "FlaxRoFormerForMultipleChoice",
90
+ "FlaxRoFormerForQuestionAnswering",
91
+ "FlaxRoFormerForSequenceClassification",
92
+ "FlaxRoFormerForTokenClassification",
93
+ "FlaxRoFormerModel",
94
+ "FlaxRoFormerPreTrainedModel",
95
+ ]
96
+
97
+
98
+ if TYPE_CHECKING:
99
+ from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
100
+ from .tokenization_roformer import RoFormerTokenizer
101
+
102
+ try:
103
+ if not is_tokenizers_available():
104
+ raise OptionalDependencyNotAvailable()
105
+ except OptionalDependencyNotAvailable:
106
+ pass
107
+ else:
108
+ from .tokenization_roformer_fast import RoFormerTokenizerFast
109
+
110
+ try:
111
+ if not is_torch_available():
112
+ raise OptionalDependencyNotAvailable()
113
+ except OptionalDependencyNotAvailable:
114
+ pass
115
+ else:
116
+ from .modeling_roformer import (
117
+ ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
118
+ RoFormerForCausalLM,
119
+ RoFormerForMaskedLM,
120
+ RoFormerForMultipleChoice,
121
+ RoFormerForQuestionAnswering,
122
+ RoFormerForSequenceClassification,
123
+ RoFormerForTokenClassification,
124
+ RoFormerLayer,
125
+ RoFormerModel,
126
+ RoFormerPreTrainedModel,
127
+ load_tf_weights_in_roformer,
128
+ )
129
+
130
+ try:
131
+ if not is_tf_available():
132
+ raise OptionalDependencyNotAvailable()
133
+ except OptionalDependencyNotAvailable:
134
+ pass
135
+ else:
136
+ from .modeling_tf_roformer import (
137
+ TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
138
+ TFRoFormerForCausalLM,
139
+ TFRoFormerForMaskedLM,
140
+ TFRoFormerForMultipleChoice,
141
+ TFRoFormerForQuestionAnswering,
142
+ TFRoFormerForSequenceClassification,
143
+ TFRoFormerForTokenClassification,
144
+ TFRoFormerLayer,
145
+ TFRoFormerModel,
146
+ TFRoFormerPreTrainedModel,
147
+ )
148
+
149
+ try:
150
+ if not is_flax_available():
151
+ raise OptionalDependencyNotAvailable()
152
+ except OptionalDependencyNotAvailable:
153
+ pass
154
+ else:
155
+ from .modeling_flax_roformer import (
156
+ FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
157
+ FlaxRoFormerForMaskedLM,
158
+ FlaxRoFormerForMultipleChoice,
159
+ FlaxRoFormerForQuestionAnswering,
160
+ FlaxRoFormerForSequenceClassification,
161
+ FlaxRoFormerForTokenClassification,
162
+ FlaxRoFormerModel,
163
+ FlaxRoFormerPreTrainedModel,
164
+ )
165
+
166
+
167
+ else:
168
+ import sys
169
+
170
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/roformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roformer/__pycache__/configuration_roformer.cpython-310.pyc ADDED
Binary file (6.19 kB). View file