applied-ai-018 commited on
Commit
d315ee6
·
verified ·
1 Parent(s): 7ac701f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__init__.py +63 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/configuration_biogpt.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/convert_biogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/modeling_biogpt.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/tokenization_biogpt.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/configuration_biogpt.py +134 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py +292 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/modeling_biogpt.py +924 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/tokenization_biogpt.py +357 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__init__.py +135 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/__init__.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_audio.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_text.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_vision.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_audio.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_text.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_vision.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_tf_data2vec_vision.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_audio.py +285 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_text.py +153 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_vision.py +193 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py +286 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py +208 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py +374 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_audio.py +1514 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_text.py +1557 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_vision.py +1228 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_tf_data2vec_vision.py +1717 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__init__.py +120 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py +193 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py +1426 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py +1644 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py +393 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py +247 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__init__.py +75 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/__init__.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/configuration_mask2former.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/convert_mask2former_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/modeling_mask2former.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
21
+ "tokenization_biogpt": ["BioGptTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_biogpt"] = [
31
+ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "BioGptForCausalLM",
33
+ "BioGptForTokenClassification",
34
+ "BioGptForSequenceClassification",
35
+ "BioGptModel",
36
+ "BioGptPreTrainedModel",
37
+ ]
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
42
+ from .tokenization_biogpt import BioGptTokenizer
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ from .modeling_biogpt import (
51
+ BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
52
+ BioGptForCausalLM,
53
+ BioGptForSequenceClassification,
54
+ BioGptForTokenClassification,
55
+ BioGptModel,
56
+ BioGptPreTrainedModel,
57
+ )
58
+
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/configuration_biogpt.cpython-310.pyc ADDED
Binary file (5.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/convert_biogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (8.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/modeling_biogpt.cpython-310.pyc ADDED
Binary file (26.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/tokenization_biogpt.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/configuration_biogpt.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BioGPT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class BioGptConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an
30
+ BioGPT model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the BioGPT
32
+ [microsoft/biogpt](https://huggingface.co/microsoft/biogpt) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 42384):
40
+ Vocabulary size of the BioGPT model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`BioGptModel`].
42
+ hidden_size (`int`, *optional*, defaults to 1024):
43
+ Dimension of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 24):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 16):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 4096):
49
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
53
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
54
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
55
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention probabilities.
57
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
63
+ The epsilon used by the layer normalization layers.
64
+ scale_embedding (`bool`, *optional*, defaults to `True`):
65
+ Scale embeddings by diving by sqrt(d_model).
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ layerdrop (`float`, *optional*, defaults to 0.0):
70
+ Please refer to the paper about LayerDrop: https://arxiv.org/abs/1909.11556 for further details
71
+ activation_dropout (`float`, *optional*, defaults to 0.0):
72
+ The dropout ratio for activations inside the fully connected layer.
73
+ pad_token_id (`int`, *optional*, defaults to 1):
74
+ Padding token id.
75
+ bos_token_id (`int`, *optional*, defaults to 0):
76
+ Beginning of stream token id.
77
+ eos_token_id (`int`, *optional*, defaults to 2):
78
+ End of stream token id.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import BioGptModel, BioGptConfig
84
+
85
+ >>> # Initializing a BioGPT microsoft/biogpt style configuration
86
+ >>> configuration = BioGptConfig()
87
+
88
+ >>> # Initializing a model from the microsoft/biogpt style configuration
89
+ >>> model = BioGptModel(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "biogpt"
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=42384,
100
+ hidden_size=1024,
101
+ num_hidden_layers=24,
102
+ num_attention_heads=16,
103
+ intermediate_size=4096,
104
+ hidden_act="gelu",
105
+ hidden_dropout_prob=0.1,
106
+ attention_probs_dropout_prob=0.1,
107
+ max_position_embeddings=1024,
108
+ initializer_range=0.02,
109
+ layer_norm_eps=1e-12,
110
+ scale_embedding=True,
111
+ use_cache=True,
112
+ layerdrop=0.0,
113
+ activation_dropout=0.0,
114
+ pad_token_id=1,
115
+ bos_token_id=0,
116
+ eos_token_id=2,
117
+ **kwargs,
118
+ ):
119
+ self.vocab_size = vocab_size
120
+ self.max_position_embeddings = max_position_embeddings
121
+ self.hidden_size = hidden_size
122
+ self.num_hidden_layers = num_hidden_layers
123
+ self.num_attention_heads = num_attention_heads
124
+ self.intermediate_size = intermediate_size
125
+ self.hidden_act = hidden_act
126
+ self.hidden_dropout_prob = hidden_dropout_prob
127
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
128
+ self.initializer_range = initializer_range
129
+ self.layer_norm_eps = layer_norm_eps
130
+ self.scale_embedding = scale_embedding
131
+ self.use_cache = use_cache
132
+ self.layerdrop = layerdrop
133
+ self.activation_dropout = activation_dropout
134
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import argparse
18
+ import json
19
+ import os
20
+ import re
21
+ import shutil
22
+
23
+ import torch
24
+
25
+ from transformers import BioGptConfig, BioGptForCausalLM
26
+ from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
27
+ from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
28
+ from transformers.utils import WEIGHTS_NAME, logging
29
+
30
+
31
+ logging.set_verbosity_warning()
32
+
33
+ json_indent = 2
34
+
35
+
36
+ # modified from https://github.com/facebookresearch/fairseq/blob/dd74992d0d143155998e9ed4076826bcea80fb06/fairseq/data/dictionary.py#L18
37
+ class Dictionary:
38
+ """A mapping from symbols to consecutive integers"""
39
+
40
+ def __init__(
41
+ self,
42
+ *, # begin keyword-only arguments
43
+ bos="<s>",
44
+ pad="<pad>",
45
+ eos="</s>",
46
+ unk="<unk>",
47
+ extra_special_symbols=None,
48
+ ):
49
+ self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
50
+ self.symbols = []
51
+ self.count = []
52
+ self.indices = {}
53
+ self.bos_index = self.add_symbol(bos)
54
+ self.pad_index = self.add_symbol(pad)
55
+ self.eos_index = self.add_symbol(eos)
56
+ self.unk_index = self.add_symbol(unk)
57
+ if extra_special_symbols:
58
+ for s in extra_special_symbols:
59
+ self.add_symbol(s)
60
+ self.nspecial = len(self.symbols)
61
+
62
+ def __eq__(self, other):
63
+ return self.indices == other.indices
64
+
65
+ def __getitem__(self, idx):
66
+ if idx < len(self.symbols):
67
+ return self.symbols[idx]
68
+ return self.unk_word
69
+
70
+ def __len__(self):
71
+ """Returns the number of symbols in the dictionary"""
72
+ return len(self.symbols)
73
+
74
+ def __contains__(self, sym):
75
+ return sym in self.indices
76
+
77
+ @classmethod
78
+ def load(cls, f):
79
+ """Loads the dictionary from a text file with the format:
80
+
81
+ ```
82
+ <symbol0> <count0>
83
+ <symbol1> <count1>
84
+ ...
85
+ ```
86
+ """
87
+ d = cls()
88
+ d.add_from_file(f)
89
+ return d
90
+
91
+ def add_symbol(self, word, n=1, overwrite=False):
92
+ """Adds a word to the dictionary"""
93
+ if word in self.indices and not overwrite:
94
+ idx = self.indices[word]
95
+ self.count[idx] = self.count[idx] + n
96
+ return idx
97
+ else:
98
+ idx = len(self.symbols)
99
+ self.indices[word] = idx
100
+ self.symbols.append(word)
101
+ self.count.append(n)
102
+ return idx
103
+
104
+ def _load_meta(self, lines):
105
+ return 0
106
+
107
+ def add_from_file(self, f):
108
+ """
109
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
110
+ """
111
+ if isinstance(f, str):
112
+ try:
113
+ with open(f, "r", encoding="utf-8") as fd:
114
+ self.add_from_file(fd)
115
+ except FileNotFoundError as fnfe:
116
+ raise fnfe
117
+ except UnicodeError:
118
+ raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(f))
119
+ return
120
+
121
+ lines = f.readlines()
122
+ indices_start_line = self._load_meta(lines)
123
+
124
+ for line in lines[indices_start_line:]:
125
+ try:
126
+ line, field = line.rstrip().rsplit(" ", 1)
127
+ if field == "#fairseq:overwrite":
128
+ overwrite = True
129
+ line, field = line.rsplit(" ", 1)
130
+ else:
131
+ overwrite = False
132
+ count = int(field)
133
+ word = line
134
+ if word in self and not overwrite:
135
+ raise RuntimeError(
136
+ "Duplicate word found when loading Dictionary: '{}'. "
137
+ "Duplicate words can overwrite earlier ones by adding the "
138
+ "#fairseq:overwrite flag at the end of the corresponding row "
139
+ "in the dictionary file. If using the Camembert model, please "
140
+ "download an updated copy of the model file.".format(word)
141
+ )
142
+ self.add_symbol(word, n=count, overwrite=overwrite)
143
+ except ValueError:
144
+ raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
145
+
146
+
147
+ def rewrite_dict_keys(d):
148
+ # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
149
+ # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
150
+ d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items())
151
+ keep_keys = "<s> <pad> </s> <unk>".split()
152
+ # restore the special tokens
153
+ for k in keep_keys:
154
+ del d2[f"{k}</w>"]
155
+ d2[k] = d[k] # restore
156
+ return d2
157
+
158
+
159
+ def convert_biogpt_checkpoint_to_pytorch(biogpt_checkpoint_path, pytorch_dump_folder_path):
160
+ # prep
161
+ if not os.path.exists(biogpt_checkpoint_path):
162
+ raise ValueError(f"path {biogpt_checkpoint_path} does not exist!")
163
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
164
+ print(f"Writing results to {pytorch_dump_folder_path}")
165
+
166
+ # handle various types of models
167
+
168
+ checkpoint_file = os.path.join(biogpt_checkpoint_path, "checkpoint.pt")
169
+ if not os.path.isfile(checkpoint_file):
170
+ raise ValueError(f"path to the file {checkpoint_file} does not exist!")
171
+ chkpt = torch.load(checkpoint_file, map_location="cpu")
172
+
173
+ args = chkpt["cfg"]["model"]
174
+
175
+ # dicts
176
+ dict_file = os.path.join(biogpt_checkpoint_path, "dict.txt")
177
+ if not os.path.isfile(dict_file):
178
+ raise ValueError(f"path to the file {dict_file} does not exist!")
179
+ src_dict = Dictionary.load(dict_file)
180
+ src_vocab = rewrite_dict_keys(src_dict.indices)
181
+ src_vocab_size = len(src_vocab)
182
+ src_vocab_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["vocab_file"])
183
+ print(f"Generating {src_vocab_file} of {src_vocab_size} records")
184
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
185
+ f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
186
+
187
+ # merges_file (bpecodes)
188
+ bpecodes_file = os.path.join(biogpt_checkpoint_path, "bpecodes")
189
+ if not os.path.isfile(bpecodes_file):
190
+ raise ValueError(f"path to the file {bpecodes_file} does not exist!")
191
+
192
+ merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"])
193
+ shutil.copyfile(bpecodes_file, merges_file)
194
+
195
+ # model config
196
+ biogpt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json")
197
+
198
+ model_conf = {
199
+ "activation_dropout": args["activation_dropout"],
200
+ "architectures": ["BioGptForCausalLM"],
201
+ "attention_probs_dropout_prob": args["attention_dropout"],
202
+ "bos_token_id": 0,
203
+ "eos_token_id": 2,
204
+ "hidden_act": args["activation_fn"],
205
+ "hidden_dropout_prob": args["dropout"],
206
+ "hidden_size": args["decoder_embed_dim"],
207
+ "initializer_range": 0.02,
208
+ "intermediate_size": args["decoder_ffn_embed_dim"],
209
+ "layer_norm_eps": 1e-12,
210
+ "layerdrop": args["decoder_layerdrop"],
211
+ "max_position_embeddings": args["max_target_positions"],
212
+ "model_type": "biogpt",
213
+ "num_attention_heads": args["decoder_attention_heads"],
214
+ "num_hidden_layers": args["decoder_layers"],
215
+ "pad_token_id": 1,
216
+ "scale_embedding": not args["no_scale_embedding"],
217
+ "tie_word_embeddings": args["share_decoder_input_output_embed"],
218
+ "vocab_size": src_vocab_size,
219
+ }
220
+
221
+ # good hparam defaults to start with
222
+
223
+ print(f"Generating {biogpt_model_config_file}")
224
+ with open(biogpt_model_config_file, "w", encoding="utf-8") as f:
225
+ f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
226
+
227
+ # tokenizer config
228
+ biogpt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
229
+
230
+ tokenizer_conf = {
231
+ "bos_token": "<s>",
232
+ "eos_token": "</s>",
233
+ "model_max_length": 1024,
234
+ "pad_token": "<pad>",
235
+ "special_tokens_map_file": None,
236
+ "tokenizer_class": "BioGptTokenizer",
237
+ "unk_token": "<unk>",
238
+ }
239
+
240
+ print(f"Generating {biogpt_tokenizer_config_file}")
241
+ with open(biogpt_tokenizer_config_file, "w", encoding="utf-8") as f:
242
+ f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
243
+
244
+ # model
245
+ model_state_dict = chkpt["model"]
246
+
247
+ # remove unneeded keys
248
+ ignore_keys = [
249
+ "decoder.version",
250
+ ]
251
+ for k in ignore_keys:
252
+ model_state_dict.pop(k, None)
253
+
254
+ layer_names = list(model_state_dict.keys())
255
+ for layer_name in layer_names:
256
+ if layer_name.endswith("output_projection.weight"):
257
+ model_state_dict[layer_name.replace("decoder.", "")] = model_state_dict.pop(layer_name)
258
+ else:
259
+ model_state_dict[layer_name.replace("decoder", "biogpt")] = model_state_dict.pop(layer_name)
260
+
261
+ config = BioGptConfig.from_pretrained(pytorch_dump_folder_path)
262
+ model_new = BioGptForCausalLM(config)
263
+
264
+ # check that it loads ok
265
+ model_new.load_state_dict(model_state_dict)
266
+
267
+ # save
268
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
269
+ print(f"Generating {pytorch_weights_dump_path}")
270
+ torch.save(model_state_dict, pytorch_weights_dump_path)
271
+
272
+ print("Conversion is done!")
273
+
274
+
275
+ if __name__ == "__main__":
276
+ parser = argparse.ArgumentParser()
277
+ # Required parameters
278
+ parser.add_argument(
279
+ "--biogpt_checkpoint_path",
280
+ default=None,
281
+ type=str,
282
+ required=True,
283
+ help=(
284
+ "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
285
+ " bpecodes, etc."
286
+ ),
287
+ )
288
+ parser.add_argument(
289
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
290
+ )
291
+ args = parser.parse_args()
292
+ convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/modeling_biogpt.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BioGPT model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ CausalLMOutputWithCrossAttentions,
31
+ SequenceClassifierOutputWithPast,
32
+ TokenClassifierOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ )
41
+ from .configuration_biogpt import BioGptConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CHECKPOINT_FOR_DOC = "microsoft/biogpt"
47
+ _CONFIG_FOR_DOC = "BioGptConfig"
48
+
49
+
50
+ from ..deprecated._archive_maps import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ # Copied from transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding with OPT->BioGpt
54
+ class BioGptLearnedPositionalEmbedding(nn.Embedding):
55
+ """
56
+ This module learns positional embeddings up to a fixed maximum size.
57
+ """
58
+
59
+ def __init__(self, num_embeddings: int, embedding_dim: int):
60
+ # BioGpt is set up so that if padding_idx is specified then offset the embedding ids by 2
61
+ # and adjust num_embeddings appropriately. Other models don't have this hack
62
+ self.offset = 2
63
+ super().__init__(num_embeddings + self.offset, embedding_dim)
64
+
65
+ def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
66
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
67
+ attention_mask = attention_mask.long()
68
+
69
+ # create positions depending on attention_mask
70
+ positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
71
+
72
+ # cut positions if `past_key_values_length` is > 0
73
+ positions = positions[:, past_key_values_length:]
74
+
75
+ return super().forward(positions + self.offset)
76
+
77
+
78
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BioGpt
79
+ class BioGptAttention(nn.Module):
80
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
81
+
82
+ def __init__(
83
+ self,
84
+ embed_dim: int,
85
+ num_heads: int,
86
+ dropout: float = 0.0,
87
+ is_decoder: bool = False,
88
+ bias: bool = True,
89
+ is_causal: bool = False,
90
+ config: Optional[BioGptConfig] = None,
91
+ ):
92
+ super().__init__()
93
+ self.embed_dim = embed_dim
94
+ self.num_heads = num_heads
95
+ self.dropout = dropout
96
+ self.head_dim = embed_dim // num_heads
97
+ self.config = config
98
+
99
+ if (self.head_dim * num_heads) != self.embed_dim:
100
+ raise ValueError(
101
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
102
+ f" and `num_heads`: {num_heads})."
103
+ )
104
+ self.scaling = self.head_dim**-0.5
105
+ self.is_decoder = is_decoder
106
+ self.is_causal = is_causal
107
+
108
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
109
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
110
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
111
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
112
+
113
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
114
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
115
+
116
+ def forward(
117
+ self,
118
+ hidden_states: torch.Tensor,
119
+ key_value_states: Optional[torch.Tensor] = None,
120
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
121
+ attention_mask: Optional[torch.Tensor] = None,
122
+ layer_head_mask: Optional[torch.Tensor] = None,
123
+ output_attentions: bool = False,
124
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
125
+ """Input shape: Batch x Time x Channel"""
126
+
127
+ # if key_value_states are provided this layer is used as a cross-attention layer
128
+ # for the decoder
129
+ is_cross_attention = key_value_states is not None
130
+
131
+ bsz, tgt_len, _ = hidden_states.size()
132
+
133
+ # get query proj
134
+ query_states = self.q_proj(hidden_states) * self.scaling
135
+ # get key, value proj
136
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
137
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
138
+ # the provided `key_value_states` to support prefix tuning
139
+ if (
140
+ is_cross_attention
141
+ and past_key_value is not None
142
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
143
+ ):
144
+ # reuse k,v, cross_attentions
145
+ key_states = past_key_value[0]
146
+ value_states = past_key_value[1]
147
+ elif is_cross_attention:
148
+ # cross_attentions
149
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
150
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
151
+ elif past_key_value is not None:
152
+ # reuse k, v, self_attention
153
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
154
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
155
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
156
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
157
+ else:
158
+ # self_attention
159
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
160
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
161
+
162
+ if self.is_decoder:
163
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
164
+ # Further calls to cross_attention layer can then reuse all cross-attention
165
+ # key/value_states (first "if" case)
166
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
167
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
168
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
169
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
170
+ past_key_value = (key_states, value_states)
171
+
172
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
173
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
174
+ key_states = key_states.reshape(*proj_shape)
175
+ value_states = value_states.reshape(*proj_shape)
176
+
177
+ src_len = key_states.size(1)
178
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
179
+
180
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
181
+ raise ValueError(
182
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
183
+ f" {attn_weights.size()}"
184
+ )
185
+
186
+ if attention_mask is not None:
187
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
188
+ raise ValueError(
189
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
190
+ )
191
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
192
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
193
+
194
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
195
+
196
+ if layer_head_mask is not None:
197
+ if layer_head_mask.size() != (self.num_heads,):
198
+ raise ValueError(
199
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
200
+ f" {layer_head_mask.size()}"
201
+ )
202
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
203
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
204
+
205
+ if output_attentions:
206
+ # this operation is a bit awkward, but it's required to
207
+ # make sure that attn_weights keeps its gradient.
208
+ # In order to do so, attn_weights have to be reshaped
209
+ # twice and have to be reused in the following
210
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
211
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
212
+ else:
213
+ attn_weights_reshaped = None
214
+
215
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
216
+
217
+ attn_output = torch.bmm(attn_probs, value_states)
218
+
219
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
220
+ raise ValueError(
221
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
222
+ f" {attn_output.size()}"
223
+ )
224
+
225
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
226
+ attn_output = attn_output.transpose(1, 2)
227
+
228
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
229
+ # partitioned across GPUs when using tensor-parallelism.
230
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
231
+
232
+ attn_output = self.out_proj(attn_output)
233
+
234
+ return attn_output, attn_weights_reshaped, past_key_value
235
+
236
+
237
+ class BioGptDecoderLayer(nn.Module):
238
+ def __init__(self, config: BioGptConfig):
239
+ super().__init__()
240
+ self.embed_dim = config.hidden_size
241
+
242
+ self.self_attn = BioGptAttention(
243
+ embed_dim=self.embed_dim,
244
+ num_heads=config.num_attention_heads,
245
+ dropout=config.attention_probs_dropout_prob,
246
+ is_decoder=True,
247
+ )
248
+ self.dropout = config.hidden_dropout_prob
249
+ self.activation_fn = ACT2FN[config.hidden_act]
250
+ self.activation_dropout = config.activation_dropout
251
+
252
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
253
+
254
+ self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size)
255
+ self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim)
256
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
257
+
258
+ def forward(
259
+ self,
260
+ hidden_states: torch.Tensor,
261
+ attention_mask: Optional[torch.Tensor] = None,
262
+ layer_head_mask: Optional[torch.Tensor] = None,
263
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
264
+ output_attentions: Optional[bool] = False,
265
+ use_cache: Optional[bool] = True,
266
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
267
+ """
268
+ Args:
269
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
270
+ attention_mask (`torch.FloatTensor`): attention mask of size
271
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
272
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
273
+ `(encoder_attention_heads,)`.
274
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
275
+ output_attentions (`bool`, *optional*):
276
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
277
+ returned tensors for more detail.
278
+ use_cache (`bool`, *optional*):
279
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
280
+ (see `past_key_values`).
281
+ """
282
+ residual = hidden_states
283
+
284
+ hidden_states = self.self_attn_layer_norm(hidden_states)
285
+
286
+ # Self Attention
287
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
288
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
289
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
290
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
291
+ hidden_states=hidden_states,
292
+ past_key_value=self_attn_past_key_value,
293
+ attention_mask=attention_mask,
294
+ layer_head_mask=layer_head_mask,
295
+ output_attentions=output_attentions,
296
+ )
297
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
298
+ hidden_states = residual + hidden_states
299
+
300
+ # Fully Connected
301
+ residual = hidden_states
302
+ hidden_states = self.final_layer_norm(hidden_states)
303
+ hidden_states = self.fc1(hidden_states)
304
+ hidden_states = self.activation_fn(hidden_states)
305
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
306
+ hidden_states = self.fc2(hidden_states)
307
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
308
+ hidden_states = residual + hidden_states
309
+
310
+ outputs = (hidden_states,)
311
+
312
+ if output_attentions:
313
+ outputs += (self_attn_weights,)
314
+
315
+ if use_cache:
316
+ outputs += (present_key_value,)
317
+
318
+ return outputs
319
+
320
+
321
+ class BioGptPreTrainedModel(PreTrainedModel):
322
+ """
323
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
324
+ models.
325
+ """
326
+
327
+ config_class = BioGptConfig
328
+ base_model_prefix = "biogpt"
329
+ supports_gradient_checkpointing = True
330
+
331
+ def _init_weights(self, module):
332
+ """Initialize the weights"""
333
+ if isinstance(module, nn.Linear):
334
+ # Slightly different from the TF version which uses truncated_normal for initialization
335
+ # cf https://github.com/pytorch/pytorch/pull/5617
336
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
337
+ if module.bias is not None:
338
+ module.bias.data.zero_()
339
+ elif isinstance(module, nn.Embedding):
340
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
341
+ if module.padding_idx is not None:
342
+ module.weight.data[module.padding_idx].zero_()
343
+ elif isinstance(module, nn.LayerNorm):
344
+ module.bias.data.zero_()
345
+ module.weight.data.fill_(1.0)
346
+
347
+
348
+ BIOGPT_START_DOCSTRING = r"""
349
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
350
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
351
+ behavior.
352
+
353
+ Parameters:
354
+ config ([`~BioGptConfig`]): Model configuration class with all the parameters of the model.
355
+ Initializing with a config file does not load the weights associated with the model, only the
356
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
357
+ """
358
+
359
+ BIOGPT_INPUTS_DOCSTRING = r"""
360
+ Args:
361
+ input_ids (`torch.LongTensor` of shape `({0})`):
362
+ Indices of input sequence tokens in the vocabulary.
363
+
364
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
365
+ [`PreTrainedTokenizer.__call__`] for details.
366
+
367
+ [What are input IDs?](../glossary#input-ids)
368
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
369
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
370
+
371
+ - 1 for tokens that are **not masked**,
372
+ - 0 for tokens that are **masked**.
373
+
374
+ [What are attention masks?](../glossary#attention-mask)
375
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
376
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
377
+
378
+ - 1 indicates the head is **not masked**,
379
+ - 0 indicates the head is **masked**.
380
+
381
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
382
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
383
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
384
+ model's internal embedding lookup matrix.
385
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
386
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
387
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
388
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
389
+
390
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
391
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
392
+
393
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
394
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
395
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
396
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
397
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
398
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
399
+ than the model's internal embedding lookup matrix.
400
+ use_cache (`bool`, *optional*):
401
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
402
+ `past_key_values`).
403
+ output_attentions (`bool`, *optional*):
404
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
405
+ tensors for more detail.
406
+ output_hidden_states (`bool`, *optional*):
407
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
408
+ more detail.
409
+ return_dict (`bool`, *optional*):
410
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
411
+ """
412
+
413
+
414
+ @add_start_docstrings(
415
+ "The bare BioGPT Model transformer outputting raw hidden-states without any specific head on top.",
416
+ BIOGPT_START_DOCSTRING,
417
+ )
418
+ class BioGptModel(BioGptPreTrainedModel):
419
+ def __init__(self, config: BioGptConfig):
420
+ super().__init__(config)
421
+ self.config = config
422
+ self.layerdrop = config.layerdrop
423
+ self.dropout = config.hidden_dropout_prob
424
+ self.embed_dim = config.hidden_size
425
+ self.padding_idx = config.pad_token_id
426
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
427
+
428
+ self.embed_tokens = nn.Embedding(config.vocab_size, self.embed_dim, self.padding_idx)
429
+ self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim)
430
+
431
+ self.layers = nn.ModuleList([BioGptDecoderLayer(config) for _ in range(config.num_hidden_layers)])
432
+ self.layer_norm = nn.LayerNorm(self.embed_dim)
433
+
434
+ self.gradient_checkpointing = False
435
+ # Initialize weights and apply final processing
436
+ self.post_init()
437
+
438
+ def get_input_embeddings(self):
439
+ return self.embed_tokens
440
+
441
+ def set_input_embeddings(self, value):
442
+ self.embed_tokens = value
443
+
444
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
445
+ @add_code_sample_docstrings(
446
+ checkpoint=_CHECKPOINT_FOR_DOC,
447
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
448
+ config_class=_CONFIG_FOR_DOC,
449
+ )
450
+ def forward(
451
+ self,
452
+ input_ids: Optional[torch.LongTensor] = None,
453
+ attention_mask: Optional[torch.FloatTensor] = None,
454
+ head_mask: Optional[torch.FloatTensor] = None,
455
+ inputs_embeds: Optional[torch.FloatTensor] = None,
456
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
457
+ use_cache: Optional[bool] = None,
458
+ output_attentions: Optional[bool] = None,
459
+ output_hidden_states: Optional[bool] = None,
460
+ return_dict: Optional[bool] = None,
461
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
462
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
463
+ output_hidden_states = (
464
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
465
+ )
466
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
467
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
468
+
469
+ # retrieve input_ids and inputs_embeds
470
+ if input_ids is not None and inputs_embeds is not None:
471
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
472
+ elif input_ids is not None:
473
+ input = input_ids
474
+ input_shape = input.size()
475
+ elif inputs_embeds is not None:
476
+ input_shape = inputs_embeds.size()[:-1]
477
+ input = inputs_embeds[:, :, -1]
478
+ else:
479
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
480
+
481
+ # past_key_values_length
482
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
483
+
484
+ if inputs_embeds is None:
485
+ inputs_embeds = self.embed_tokens(input) * self.embed_scale
486
+
487
+ if attention_mask is None:
488
+ attention_mask = torch.ones(
489
+ (inputs_embeds.shape[0], inputs_embeds.shape[1] + past_key_values_length),
490
+ dtype=torch.bool,
491
+ device=inputs_embeds.device,
492
+ )
493
+ elif attention_mask.shape[1] != past_key_values_length + input_shape[1]:
494
+ raise ValueError(
495
+ f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be "
496
+ f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)"
497
+ )
498
+
499
+ # embed positions
500
+ positions = self.embed_positions(attention_mask, past_key_values_length)
501
+
502
+ attention_mask = _prepare_4d_causal_attention_mask(
503
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
504
+ )
505
+
506
+ hidden_states = inputs_embeds + positions
507
+
508
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
509
+
510
+ if self.gradient_checkpointing and self.training:
511
+ if use_cache:
512
+ logger.warning_once(
513
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
514
+ )
515
+ use_cache = False
516
+
517
+ all_hidden_states = () if output_hidden_states else None
518
+ all_self_attns = () if output_attentions else None
519
+ all_cross_attentions = None
520
+ next_decoder_cache = () if use_cache else None
521
+
522
+ for idx, decoder_layer in enumerate(self.layers):
523
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
524
+ if output_hidden_states:
525
+ all_hidden_states += (hidden_states,)
526
+ if self.training:
527
+ dropout_probability = torch.rand([])
528
+ if dropout_probability < self.layerdrop:
529
+ continue
530
+
531
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
532
+
533
+ if self.gradient_checkpointing and self.training:
534
+ layer_outputs = self._gradient_checkpointing_func(
535
+ decoder_layer.__call__,
536
+ hidden_states,
537
+ attention_mask,
538
+ head_mask[idx] if head_mask is not None else None,
539
+ None,
540
+ output_attentions,
541
+ use_cache,
542
+ )
543
+ else:
544
+ layer_outputs = decoder_layer(
545
+ hidden_states,
546
+ attention_mask=attention_mask,
547
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
548
+ past_key_value=past_key_value,
549
+ output_attentions=output_attentions,
550
+ use_cache=use_cache,
551
+ )
552
+
553
+ hidden_states = layer_outputs[0]
554
+
555
+ if use_cache:
556
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
557
+
558
+ if output_attentions:
559
+ all_self_attns += (layer_outputs[1],)
560
+
561
+ # add hidden states from the last decoder layer
562
+ if output_hidden_states:
563
+ all_hidden_states += (hidden_states,)
564
+
565
+ hidden_states = self.layer_norm(hidden_states)
566
+
567
+ next_cache = next_decoder_cache if use_cache else None
568
+
569
+ if not return_dict:
570
+ return tuple(
571
+ v
572
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
573
+ if v is not None
574
+ )
575
+ return BaseModelOutputWithPastAndCrossAttentions(
576
+ last_hidden_state=hidden_states,
577
+ past_key_values=next_cache,
578
+ hidden_states=all_hidden_states,
579
+ attentions=all_self_attns,
580
+ cross_attentions=all_cross_attentions,
581
+ )
582
+
583
+
584
+ @add_start_docstrings(
585
+ """BioGPT Model with a `language modeling` head on top for CLM fine-tuning.""", BIOGPT_START_DOCSTRING
586
+ )
587
+ class BioGptForCausalLM(BioGptPreTrainedModel):
588
+ _tied_weights_keys = ["output_projection.weight"]
589
+
590
+ def __init__(self, config):
591
+ super().__init__(config)
592
+
593
+ self.biogpt = BioGptModel(config)
594
+ self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
595
+
596
+ # Initialize weights and apply final processing
597
+ self.post_init()
598
+
599
+ def get_output_embeddings(self):
600
+ return self.output_projection
601
+
602
+ def set_output_embeddings(self, new_embeddings):
603
+ self.output_projection = new_embeddings
604
+
605
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
606
+ @add_code_sample_docstrings(
607
+ checkpoint=_CHECKPOINT_FOR_DOC,
608
+ output_type=CausalLMOutputWithCrossAttentions,
609
+ config_class=_CONFIG_FOR_DOC,
610
+ )
611
+ def forward(
612
+ self,
613
+ input_ids: Optional[torch.LongTensor] = None,
614
+ attention_mask: Optional[torch.FloatTensor] = None,
615
+ head_mask: Optional[torch.FloatTensor] = None,
616
+ inputs_embeds: Optional[torch.FloatTensor] = None,
617
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
618
+ labels: Optional[torch.LongTensor] = None,
619
+ use_cache: Optional[bool] = None,
620
+ output_attentions: Optional[bool] = None,
621
+ output_hidden_states: Optional[bool] = None,
622
+ return_dict: Optional[bool] = None,
623
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
624
+ r"""
625
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
626
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
627
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
628
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
629
+ """
630
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
631
+
632
+ outputs = self.biogpt(
633
+ input_ids,
634
+ attention_mask=attention_mask,
635
+ head_mask=head_mask,
636
+ inputs_embeds=inputs_embeds,
637
+ past_key_values=past_key_values,
638
+ use_cache=use_cache,
639
+ output_attentions=output_attentions,
640
+ output_hidden_states=output_hidden_states,
641
+ return_dict=return_dict,
642
+ )
643
+
644
+ sequence_output = outputs[0]
645
+ prediction_scores = self.output_projection(sequence_output)
646
+
647
+ lm_loss = None
648
+ if labels is not None:
649
+ # we are doing next-token prediction; shift prediction scores and input ids by one
650
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
651
+ labels = labels[:, 1:].contiguous()
652
+ loss_fct = CrossEntropyLoss()
653
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
654
+
655
+ if not return_dict:
656
+ output = (prediction_scores,) + outputs[1:]
657
+ return ((lm_loss,) + output) if lm_loss is not None else output
658
+
659
+ return CausalLMOutputWithCrossAttentions(
660
+ loss=lm_loss,
661
+ logits=prediction_scores,
662
+ past_key_values=outputs.past_key_values,
663
+ hidden_states=outputs.hidden_states,
664
+ attentions=outputs.attentions,
665
+ cross_attentions=outputs.cross_attentions,
666
+ )
667
+
668
+ def prepare_inputs_for_generation(
669
+ self, input_ids, attention_mask, inputs_embeds=None, past_key_values=None, **kwargs
670
+ ):
671
+ # only last tokens for inputs_ids if past is defined in kwargs
672
+ if past_key_values is not None:
673
+ past_length = past_key_values[0][0].shape[2]
674
+
675
+ # Some generation methods already pass only the last input ID
676
+ if input_ids.shape[1] > past_length:
677
+ remove_prefix_length = past_length
678
+ else:
679
+ # Default to old behavior: keep only final ID
680
+ remove_prefix_length = input_ids.shape[1] - 1
681
+
682
+ input_ids = input_ids[:, remove_prefix_length:]
683
+
684
+ if inputs_embeds is not None and past_key_values is None:
685
+ model_inputs = {"inputs_embeds": inputs_embeds}
686
+ else:
687
+ model_inputs = {"input_ids": input_ids}
688
+
689
+ model_inputs.update(
690
+ {
691
+ "attention_mask": attention_mask,
692
+ "past_key_values": past_key_values,
693
+ "use_cache": kwargs.get("use_cache"),
694
+ }
695
+ )
696
+
697
+ return model_inputs
698
+
699
+ @staticmethod
700
+ def _reorder_cache(past_key_values, beam_idx):
701
+ reordered_past = ()
702
+ for layer_past in past_key_values:
703
+ reordered_past += (
704
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
705
+ )
706
+ return reordered_past
707
+
708
+
709
+ @add_start_docstrings(
710
+ """
711
+ BioGPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
712
+ Named-Entity-Recognition (NER) tasks.
713
+ """,
714
+ BIOGPT_START_DOCSTRING,
715
+ )
716
+ class BioGptForTokenClassification(BioGptPreTrainedModel):
717
+ def __init__(self, config):
718
+ super().__init__(config)
719
+ self.num_labels = config.num_labels
720
+
721
+ self.biogpt = BioGptModel(config)
722
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
723
+ classifier_dropout = config.classifier_dropout
724
+ else:
725
+ classifier_dropout = config.hidden_dropout_prob
726
+ self.dropout = nn.Dropout(classifier_dropout)
727
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
728
+
729
+ self.post_init()
730
+
731
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING)
732
+ @add_code_sample_docstrings(
733
+ checkpoint=_CHECKPOINT_FOR_DOC,
734
+ output_type=TokenClassifierOutput,
735
+ config_class=_CONFIG_FOR_DOC,
736
+ )
737
+ def forward(
738
+ self,
739
+ input_ids: Optional[torch.LongTensor] = None,
740
+ token_type_ids: Optional[torch.LongTensor] = None,
741
+ attention_mask: Optional[torch.FloatTensor] = None,
742
+ head_mask: Optional[torch.FloatTensor] = None,
743
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
744
+ inputs_embeds: Optional[torch.FloatTensor] = None,
745
+ labels: Optional[torch.LongTensor] = None,
746
+ use_cache: Optional[bool] = None,
747
+ output_attentions: Optional[bool] = None,
748
+ output_hidden_states: Optional[bool] = None,
749
+ return_dict: Optional[bool] = None,
750
+ ) -> Union[Tuple, TokenClassifierOutput]:
751
+ r"""
752
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
753
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
754
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
755
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
756
+ """
757
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
758
+
759
+ transformer_outputs = self.biogpt(
760
+ input_ids,
761
+ past_key_values=past_key_values,
762
+ attention_mask=attention_mask,
763
+ head_mask=head_mask,
764
+ inputs_embeds=inputs_embeds,
765
+ use_cache=use_cache,
766
+ output_attentions=output_attentions,
767
+ output_hidden_states=output_hidden_states,
768
+ return_dict=return_dict,
769
+ )
770
+
771
+ hidden_states = transformer_outputs[0]
772
+ hidden_states = self.dropout(hidden_states)
773
+ logits = self.classifier(hidden_states)
774
+
775
+ loss = None
776
+ if labels is not None:
777
+ loss_fct = CrossEntropyLoss()
778
+ # Only keep active parts of the loss
779
+ if attention_mask is not None:
780
+ active_loss = attention_mask.view(-1) == 1
781
+ active_logits = logits.view(-1, self.num_labels)
782
+ active_labels = torch.where(
783
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
784
+ )
785
+ loss = loss_fct(active_logits, active_labels)
786
+ else:
787
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
788
+
789
+ if not return_dict:
790
+ output = (logits,) + transformer_outputs[2:]
791
+ return ((loss,) + output) if loss is not None else output
792
+
793
+ return TokenClassifierOutput(
794
+ loss=loss,
795
+ logits=logits,
796
+ hidden_states=transformer_outputs.hidden_states,
797
+ attentions=transformer_outputs.attentions,
798
+ )
799
+
800
+
801
+ @add_start_docstrings(
802
+ """
803
+ The BioGpt Model transformer with a sequence classification head on top (linear layer).
804
+
805
+ [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
806
+ (e.g. GPT-2) do.
807
+
808
+ Since it does classification on the last token, it is required to know the position of the last token. If a
809
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
810
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
811
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
812
+ each row of the batch).
813
+ """,
814
+ BIOGPT_START_DOCSTRING,
815
+ )
816
+ class BioGptForSequenceClassification(BioGptPreTrainedModel):
817
+ def __init__(self, config: BioGptConfig):
818
+ super().__init__(config)
819
+ self.num_labels = config.num_labels
820
+ self.biogpt = BioGptModel(config)
821
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
822
+
823
+ # Initialize weights and apply final processing
824
+ self.post_init()
825
+
826
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING)
827
+ @add_code_sample_docstrings(
828
+ checkpoint=_CHECKPOINT_FOR_DOC,
829
+ output_type=SequenceClassifierOutputWithPast,
830
+ config_class=_CONFIG_FOR_DOC,
831
+ )
832
+ def forward(
833
+ self,
834
+ input_ids: Optional[torch.LongTensor] = None,
835
+ attention_mask: Optional[torch.FloatTensor] = None,
836
+ head_mask: Optional[torch.FloatTensor] = None,
837
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
838
+ inputs_embeds: Optional[torch.FloatTensor] = None,
839
+ labels: Optional[torch.LongTensor] = None,
840
+ use_cache: Optional[bool] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
845
+ r"""
846
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
847
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
848
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
849
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
850
+ """
851
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
852
+
853
+ transformer_outputs = self.biogpt(
854
+ input_ids,
855
+ past_key_values=past_key_values,
856
+ attention_mask=attention_mask,
857
+ head_mask=head_mask,
858
+ inputs_embeds=inputs_embeds,
859
+ use_cache=use_cache,
860
+ output_attentions=output_attentions,
861
+ output_hidden_states=output_hidden_states,
862
+ return_dict=return_dict,
863
+ )
864
+ hidden_states = transformer_outputs[0]
865
+ logits = self.score(hidden_states)
866
+
867
+ if input_ids is not None:
868
+ batch_size, sequence_length = input_ids.shape[:2]
869
+ else:
870
+ batch_size, sequence_length = inputs_embeds.shape[:2]
871
+
872
+ if self.config.pad_token_id is None:
873
+ sequence_length = -1
874
+ else:
875
+ if input_ids is not None:
876
+ sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
877
+ else:
878
+ sequence_length = -1
879
+ logger.warning(
880
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
881
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
882
+ )
883
+
884
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length]
885
+
886
+ loss = None
887
+ if labels is not None:
888
+ if self.config.problem_type is None:
889
+ if self.num_labels == 1:
890
+ self.config.problem_type = "regression"
891
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
892
+ self.config.problem_type = "single_label_classification"
893
+ else:
894
+ self.config.problem_type = "multi_label_classification"
895
+
896
+ if self.config.problem_type == "regression":
897
+ loss_fct = MSELoss()
898
+ if self.num_labels == 1:
899
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
900
+ else:
901
+ loss = loss_fct(pooled_logits, labels)
902
+ elif self.config.problem_type == "single_label_classification":
903
+ loss_fct = CrossEntropyLoss()
904
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
905
+ elif self.config.problem_type == "multi_label_classification":
906
+ loss_fct = BCEWithLogitsLoss()
907
+ loss = loss_fct(pooled_logits, labels)
908
+ if not return_dict:
909
+ output = (pooled_logits,) + transformer_outputs[1:]
910
+ return ((loss,) + output) if loss is not None else output
911
+
912
+ return SequenceClassifierOutputWithPast(
913
+ loss=loss,
914
+ logits=pooled_logits,
915
+ past_key_values=transformer_outputs.past_key_values,
916
+ hidden_states=transformer_outputs.hidden_states,
917
+ attentions=transformer_outputs.attentions,
918
+ )
919
+
920
+ def get_input_embeddings(self):
921
+ return self.biogpt.embed_tokens
922
+
923
+ def set_input_embeddings(self, value):
924
+ self.biogpt.embed_tokens = value
llmeval-env/lib/python3.10/site-packages/transformers/models/biogpt/tokenization_biogpt.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for BioGPT."""
16
+ import json
17
+ import os
18
+ from typing import List, Optional, Tuple
19
+
20
+ from ...tokenization_utils import PreTrainedTokenizer
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ VOCAB_FILES_NAMES = {
27
+ "vocab_file": "vocab.json",
28
+ "merges_file": "merges.txt",
29
+ }
30
+
31
+
32
+ def get_pairs(word):
33
+ """
34
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
35
+ strings)
36
+ """
37
+ pairs = set()
38
+ prev_char = word[0]
39
+ for char in word[1:]:
40
+ pairs.add((prev_char, char))
41
+ prev_char = char
42
+ return pairs
43
+
44
+
45
+ class BioGptTokenizer(PreTrainedTokenizer):
46
+ """
47
+ Construct an FAIRSEQ Transformer tokenizer. Moses tokenization followed by Byte-Pair Encoding.
48
+
49
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
50
+ this superclass for more information regarding those methods.
51
+
52
+ Args:
53
+ vocab_file (`str`):
54
+ Path to the vocabulary file.
55
+ merges_file (`str`):
56
+ Merges file.
57
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
58
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
59
+ token instead.
60
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
61
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
62
+
63
+ <Tip>
64
+
65
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
66
+ sequence. The token used is the `cls_token`.
67
+
68
+ </Tip>
69
+
70
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
71
+ The end of sequence token.
72
+
73
+ <Tip>
74
+
75
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
76
+ The token used is the `sep_token`.
77
+
78
+ </Tip>
79
+
80
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
81
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
82
+ sequence classification or for a text and a question for question answering. It is also used as the last
83
+ token of a sequence built with special tokens.
84
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
85
+ The token used for padding, for example when batching sequences of different lengths.
86
+ """
87
+
88
+ vocab_files_names = VOCAB_FILES_NAMES
89
+ model_input_names = ["input_ids", "attention_mask"]
90
+
91
+ def __init__(
92
+ self,
93
+ vocab_file,
94
+ merges_file,
95
+ unk_token="<unk>",
96
+ bos_token="<s>",
97
+ eos_token="</s>",
98
+ sep_token="</s>",
99
+ pad_token="<pad>",
100
+ **kwargs,
101
+ ):
102
+ try:
103
+ import sacremoses
104
+ except ImportError:
105
+ raise ImportError(
106
+ "You need to install sacremoses to use BioGptTokenizer. "
107
+ "See https://pypi.org/project/sacremoses/ for installation."
108
+ )
109
+
110
+ self.lang = "en"
111
+ self.sm = sacremoses
112
+ # cache of sm.MosesTokenizer instance
113
+ self.cache_moses_tokenizer = {}
114
+ self.cache_moses_detokenizer = {}
115
+
116
+ """ Initialisation"""
117
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
118
+ self.encoder = json.load(vocab_handle)
119
+ self.decoder = {v: k for k, v in self.encoder.items()}
120
+ with open(merges_file, encoding="utf-8") as merges_handle:
121
+ merges = merges_handle.read().split("\n")[:-1]
122
+ merges = [tuple(merge.split()[:2]) for merge in merges]
123
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
124
+ self.cache = {}
125
+
126
+ super().__init__(
127
+ bos_token=bos_token,
128
+ eos_token=eos_token,
129
+ sep_token=sep_token,
130
+ unk_token=unk_token,
131
+ pad_token=pad_token,
132
+ **kwargs,
133
+ )
134
+
135
+ @property
136
+ def vocab_size(self):
137
+ """Returns vocab size"""
138
+ return len(self.encoder)
139
+
140
+ def get_vocab(self):
141
+ return dict(self.encoder, **self.added_tokens_encoder)
142
+
143
+ def moses_tokenize(self, text, lang):
144
+ if lang not in self.cache_moses_tokenizer:
145
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
146
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
147
+ return self.cache_moses_tokenizer[lang].tokenize(
148
+ text, aggressive_dash_splits=True, return_str=False, escape=True
149
+ )
150
+
151
+ def moses_detokenize(self, tokens, lang):
152
+ if lang not in self.cache_moses_detokenizer:
153
+ moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
154
+ self.cache_moses_detokenizer[lang] = moses_detokenizer
155
+ return self.cache_moses_detokenizer[lang].detokenize(tokens)
156
+
157
+ def bpe(self, token):
158
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
159
+ if token in self.cache:
160
+ return self.cache[token]
161
+ pairs = get_pairs(word)
162
+
163
+ if not pairs:
164
+ return token + "</w>"
165
+
166
+ while True:
167
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
168
+ if bigram not in self.bpe_ranks:
169
+ break
170
+ first, second = bigram
171
+ new_word = []
172
+ i = 0
173
+ while i < len(word):
174
+ try:
175
+ j = word.index(first, i)
176
+ except ValueError:
177
+ new_word.extend(word[i:])
178
+ break
179
+ else:
180
+ new_word.extend(word[i:j])
181
+ i = j
182
+
183
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
184
+ new_word.append(first + second)
185
+ i += 2
186
+ else:
187
+ new_word.append(word[i])
188
+ i += 1
189
+ new_word = tuple(new_word)
190
+ word = new_word
191
+ if len(word) == 1:
192
+ break
193
+ else:
194
+ pairs = get_pairs(word)
195
+ word = " ".join(word)
196
+ if word == "\n </w>":
197
+ word = "\n</w>"
198
+ self.cache[token] = word
199
+ return word
200
+
201
+ def _tokenize(self, text, bypass_tokenizer=False):
202
+ """Returns a tokenized string."""
203
+ if bypass_tokenizer:
204
+ text = text.split()
205
+ else:
206
+ text = self.moses_tokenize(text, self.lang)
207
+
208
+ split_tokens = []
209
+ for token in text:
210
+ if token:
211
+ split_tokens.extend(list(self.bpe(token).split(" ")))
212
+
213
+ return split_tokens
214
+
215
+ def _convert_token_to_id(self, token):
216
+ """Converts a token (str) in an id using the vocab."""
217
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
218
+
219
+ def _convert_id_to_token(self, index):
220
+ """Converts an index (integer) in a token (str) using the vocab."""
221
+ return self.decoder.get(index, self.unk_token)
222
+
223
+ def convert_tokens_to_string(self, tokens):
224
+ """Converts a sequence of tokens (string) in a single string."""
225
+ # remove BPE
226
+ tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
227
+ tokens = "".join(tokens).split()
228
+ # detokenize
229
+ text = self.moses_detokenize(tokens, self.lang)
230
+ return text
231
+
232
+ def build_inputs_with_special_tokens(
233
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
234
+ ) -> List[int]:
235
+ """
236
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
237
+ adding special tokens. A BioGPT sequence has the following format:
238
+
239
+ - single sequence: `</s> X `
240
+ - pair of sequences: `</s> A </s> B `
241
+
242
+ Args:
243
+ token_ids_0 (`List[int]`):
244
+ List of IDs to which the special tokens will be added.
245
+ token_ids_1 (`List[int]`, *optional*):
246
+ Optional second list of IDs for sequence pairs.
247
+
248
+ Returns:
249
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
250
+ """
251
+ if token_ids_1 is None:
252
+ return [self.sep_token_id] + token_ids_0
253
+ sep = [self.sep_token_id]
254
+ return sep + token_ids_0 + sep + token_ids_1
255
+
256
+ def get_special_tokens_mask(
257
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
258
+ ) -> List[int]:
259
+ """
260
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
261
+ special tokens using the tokenizer `prepare_for_model` method.
262
+
263
+ Args:
264
+ token_ids_0 (`List[int]`):
265
+ List of IDs.
266
+ token_ids_1 (`List[int]`, *optional*):
267
+ Optional second list of IDs for sequence pairs.
268
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
269
+ Whether or not the token list is already formatted with special tokens for the model.
270
+
271
+ Returns:
272
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
273
+ """
274
+ if already_has_special_tokens:
275
+ return super().get_special_tokens_mask(
276
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
277
+ )
278
+ # no bos used in fairseq
279
+ if token_ids_1 is not None:
280
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
281
+ return [1] + ([0] * len(token_ids_0))
282
+
283
+ def create_token_type_ids_from_sequences(
284
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
285
+ ) -> List[int]:
286
+ """
287
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
288
+ Transformer sequence pair mask has the following format:
289
+
290
+ ```
291
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
292
+ | first sequence | second sequence |
293
+ ```
294
+
295
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
296
+
297
+ Args:
298
+ token_ids_0 (`List[int]`):
299
+ List of IDs.
300
+ token_ids_1 (`List[int]`, *optional*):
301
+ Optional second list of IDs for sequence pairs.
302
+
303
+ Returns:
304
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
305
+ """
306
+ sep = [self.sep_token_id]
307
+
308
+ # no bos used in fairseq
309
+ if token_ids_1 is None:
310
+ return len(token_ids_0 + sep) * [0]
311
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
312
+
313
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
314
+ if not os.path.isdir(save_directory):
315
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
316
+ return
317
+ vocab_file = os.path.join(
318
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
319
+ )
320
+ merge_file = os.path.join(
321
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
322
+ )
323
+
324
+ with open(vocab_file, "w", encoding="utf-8") as f:
325
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
326
+
327
+ index = 0
328
+ with open(merge_file, "w", encoding="utf-8") as writer:
329
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
330
+ if index != token_index:
331
+ logger.warning(
332
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
333
+ " Please check that the tokenizer is not corrupted!"
334
+ )
335
+ index = token_index
336
+ writer.write(" ".join(bpe_tokens) + "\n")
337
+ index += 1
338
+
339
+ return vocab_file, merge_file
340
+
341
+ def __getstate__(self):
342
+ state = self.__dict__.copy()
343
+ state["sm"] = None
344
+ return state
345
+
346
+ def __setstate__(self, d):
347
+ self.__dict__ = d
348
+
349
+ try:
350
+ import sacremoses
351
+ except ImportError:
352
+ raise ImportError(
353
+ "You need to install sacremoses to use XLMTokenizer. "
354
+ "See https://pypi.org/project/sacremoses/ for installation."
355
+ )
356
+
357
+ self.sm = sacremoses
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__init__.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
22
+ "configuration_data2vec_text": [
23
+ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
24
+ "Data2VecTextConfig",
25
+ "Data2VecTextOnnxConfig",
26
+ ],
27
+ "configuration_data2vec_vision": [
28
+ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "Data2VecVisionConfig",
30
+ "Data2VecVisionOnnxConfig",
31
+ ],
32
+ }
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_data2vec_audio"] = [
41
+ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "Data2VecAudioForAudioFrameClassification",
43
+ "Data2VecAudioForCTC",
44
+ "Data2VecAudioForSequenceClassification",
45
+ "Data2VecAudioForXVector",
46
+ "Data2VecAudioModel",
47
+ "Data2VecAudioPreTrainedModel",
48
+ ]
49
+ _import_structure["modeling_data2vec_text"] = [
50
+ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "Data2VecTextForCausalLM",
52
+ "Data2VecTextForMaskedLM",
53
+ "Data2VecTextForMultipleChoice",
54
+ "Data2VecTextForQuestionAnswering",
55
+ "Data2VecTextForSequenceClassification",
56
+ "Data2VecTextForTokenClassification",
57
+ "Data2VecTextModel",
58
+ "Data2VecTextPreTrainedModel",
59
+ ]
60
+ _import_structure["modeling_data2vec_vision"] = [
61
+ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
62
+ "Data2VecVisionForImageClassification",
63
+ "Data2VecVisionForMaskedImageModeling",
64
+ "Data2VecVisionForSemanticSegmentation",
65
+ "Data2VecVisionModel",
66
+ "Data2VecVisionPreTrainedModel",
67
+ ]
68
+
69
+ if is_tf_available():
70
+ _import_structure["modeling_tf_data2vec_vision"] = [
71
+ "TFData2VecVisionForImageClassification",
72
+ "TFData2VecVisionForSemanticSegmentation",
73
+ "TFData2VecVisionModel",
74
+ "TFData2VecVisionPreTrainedModel",
75
+ ]
76
+
77
+ if TYPE_CHECKING:
78
+ from .configuration_data2vec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecAudioConfig
79
+ from .configuration_data2vec_text import (
80
+ DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
81
+ Data2VecTextConfig,
82
+ Data2VecTextOnnxConfig,
83
+ )
84
+ from .configuration_data2vec_vision import (
85
+ DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
86
+ Data2VecVisionConfig,
87
+ Data2VecVisionOnnxConfig,
88
+ )
89
+
90
+ try:
91
+ if not is_torch_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_data2vec_audio import (
97
+ DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
98
+ Data2VecAudioForAudioFrameClassification,
99
+ Data2VecAudioForCTC,
100
+ Data2VecAudioForSequenceClassification,
101
+ Data2VecAudioForXVector,
102
+ Data2VecAudioModel,
103
+ Data2VecAudioPreTrainedModel,
104
+ )
105
+ from .modeling_data2vec_text import (
106
+ DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
107
+ Data2VecTextForCausalLM,
108
+ Data2VecTextForMaskedLM,
109
+ Data2VecTextForMultipleChoice,
110
+ Data2VecTextForQuestionAnswering,
111
+ Data2VecTextForSequenceClassification,
112
+ Data2VecTextForTokenClassification,
113
+ Data2VecTextModel,
114
+ Data2VecTextPreTrainedModel,
115
+ )
116
+ from .modeling_data2vec_vision import (
117
+ DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
118
+ Data2VecVisionForImageClassification,
119
+ Data2VecVisionForMaskedImageModeling,
120
+ Data2VecVisionForSemanticSegmentation,
121
+ Data2VecVisionModel,
122
+ Data2VecVisionPreTrainedModel,
123
+ )
124
+ if is_tf_available():
125
+ from .modeling_tf_data2vec_vision import (
126
+ TFData2VecVisionForImageClassification,
127
+ TFData2VecVisionForSemanticSegmentation,
128
+ TFData2VecVisionModel,
129
+ TFData2VecVisionPreTrainedModel,
130
+ )
131
+
132
+ else:
133
+ import sys
134
+
135
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_audio.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_text.cpython-310.pyc ADDED
Binary file (6.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_vision.cpython-310.pyc ADDED
Binary file (8.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_audio.cpython-310.pyc ADDED
Binary file (41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_text.cpython-310.pyc ADDED
Binary file (45.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_vision.cpython-310.pyc ADDED
Binary file (38.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_tf_data2vec_vision.cpython-310.pyc ADDED
Binary file (52.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_audio.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Data2VecText configuration"""
16
+
17
+ import math
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class Data2VecAudioConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`Data2VecAudioModel`]. It is used to instantiate
29
+ an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a
30
+ configuration with the defaults will yield a similar configuration to that of the Data2VecAudio
31
+ [facebook/data2vec-audio-base-960h](https://huggingface.co/facebook/data2vec-audio-base-960h) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32):
39
+ Vocabulary size of the Data2VecAudio model. Defines the number of different tokens that can be represented
40
+ by the `inputs_ids` passed when calling [`Data2VecAudioModel`] or [`TFData2VecAudioModel`]. Vocabulary size
41
+ of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the
42
+ forward method of [`Data2VecAudioModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ activation_dropout (`float`, *optional*, defaults to 0.1):
57
+ The dropout ratio for activations inside the fully connected layer.
58
+ attention_dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention probabilities.
60
+ final_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for the final projection layer of [`Data2VecAudioForCTC`].
62
+ layerdrop (`float`, *optional*, defaults to 0.1):
63
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
64
+ details.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
70
+ The dropout probability for output of the feature encoder.
71
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
72
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
73
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
74
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
75
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
76
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
77
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
78
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
79
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
80
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
81
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
82
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
83
+ *conv_dim*.
84
+ conv_bias (`bool`, *optional*, defaults to `False`):
85
+ Whether the 1D convolutional layers have a bias.
86
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
87
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
88
+ embeddings layer.
89
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
90
+ Number of groups of 1D convolutional positional embeddings layer.
91
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
92
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
93
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
94
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
95
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
96
+ mask_time_length (`int`, *optional*, defaults to 10):
97
+ Length of vector span along the time axis.
98
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
99
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
100
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
101
+ mask_time_min_masks''
102
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
103
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
104
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
105
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
106
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
107
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
108
+ True`.
109
+ mask_feature_length (`int`, *optional*, defaults to 10):
110
+ Length of vector span along the feature axis.
111
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
112
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
113
+ step, irrespectively of `mask_feature_prob`. Only relevant if
114
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
115
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
116
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
117
+ instance of [`Data2VecAudioForCTC`].
118
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
119
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
120
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
121
+ of [`Data2VecAudioForCTC`].
122
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
123
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
124
+ instance of [`Data2VecAudioForSequenceClassification`].
125
+ classifier_proj_size (`int`, *optional*, defaults to 256):
126
+ Dimensionality of the projection before token mean-pooling for classification.
127
+ tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
128
+ A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
129
+ module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
130
+ tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
131
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
132
+ *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
133
+ tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
134
+ A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
135
+ *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
136
+ xvector_output_dim (`int`, *optional*, defaults to 512):
137
+ Dimensionality of the *XVector* embedding vectors.
138
+ add_adapter (`bool`, *optional*, defaults to `False`):
139
+ Whether a convolutional network should be stacked on top of the Data2VecAudio Encoder. Can be very useful
140
+ for warm-starting Data2VecAudio for SpeechEncoderDecoder models.
141
+ adapter_kernel_size (`int`, *optional*, defaults to 3):
142
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
143
+ adapter_stride (`int`, *optional*, defaults to 2):
144
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
145
+ num_adapter_layers (`int`, *optional*, defaults to 3):
146
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
147
+ True`.
148
+ output_hidden_size (`int`, *optional*):
149
+ Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
150
+ if `add_adapter is True`.
151
+
152
+ Example:
153
+
154
+ ```python
155
+ >>> from transformers import Data2VecAudioConfig, Data2VecAudioModel
156
+
157
+ >>> # Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration
158
+ >>> configuration = Data2VecAudioConfig()
159
+
160
+ >>> # Initializing a model (with random weights) from the facebook/data2vec-audio-base-960h style configuration
161
+ >>> model = Data2VecAudioModel(configuration)
162
+
163
+ >>> # Accessing the model configuration
164
+ >>> configuration = model.config
165
+ ```"""
166
+
167
+ model_type = "data2vec-audio"
168
+
169
+ def __init__(
170
+ self,
171
+ vocab_size=32,
172
+ hidden_size=768,
173
+ num_hidden_layers=12,
174
+ num_attention_heads=12,
175
+ intermediate_size=3072,
176
+ hidden_act="gelu",
177
+ hidden_dropout=0.1,
178
+ activation_dropout=0.1,
179
+ attention_dropout=0.1,
180
+ feat_proj_dropout=0.0,
181
+ final_dropout=0.1,
182
+ layerdrop=0.1,
183
+ initializer_range=0.02,
184
+ layer_norm_eps=1e-5,
185
+ feat_extract_activation="gelu",
186
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
187
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
188
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
189
+ conv_bias=False,
190
+ num_conv_pos_embedding_groups=16,
191
+ conv_pos_kernel_size=19,
192
+ num_conv_pos_embeddings=5,
193
+ mask_time_prob=0.05,
194
+ mask_time_length=10,
195
+ mask_time_min_masks=2,
196
+ mask_feature_prob=0.0,
197
+ mask_feature_length=10,
198
+ mask_feature_min_masks=0,
199
+ ctc_loss_reduction="sum",
200
+ ctc_zero_infinity=False,
201
+ use_weighted_layer_sum=False,
202
+ classifier_proj_size=256,
203
+ tdnn_dim=(512, 512, 512, 512, 1500),
204
+ tdnn_kernel=(5, 3, 3, 1, 1),
205
+ tdnn_dilation=(1, 2, 3, 1, 1),
206
+ xvector_output_dim=512,
207
+ pad_token_id=0,
208
+ bos_token_id=1,
209
+ eos_token_id=2,
210
+ add_adapter=False,
211
+ adapter_kernel_size=3,
212
+ adapter_stride=2,
213
+ num_adapter_layers=3,
214
+ output_hidden_size=None,
215
+ **kwargs,
216
+ ):
217
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
218
+ self.hidden_size = hidden_size
219
+ self.feat_extract_activation = feat_extract_activation
220
+ self.conv_dim = list(conv_dim)
221
+ self.conv_stride = list(conv_stride)
222
+ self.conv_kernel = list(conv_kernel)
223
+ self.conv_bias = conv_bias
224
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
225
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
226
+ self.conv_pos_kernel_size = conv_pos_kernel_size
227
+ self.num_feat_extract_layers = len(self.conv_dim)
228
+ self.num_hidden_layers = num_hidden_layers
229
+ self.intermediate_size = intermediate_size
230
+ self.hidden_act = hidden_act
231
+ self.num_attention_heads = num_attention_heads
232
+ self.hidden_dropout = hidden_dropout
233
+ self.attention_dropout = attention_dropout
234
+ self.activation_dropout = activation_dropout
235
+ self.feat_proj_dropout = feat_proj_dropout
236
+ self.final_dropout = final_dropout
237
+ self.layerdrop = layerdrop
238
+ self.layer_norm_eps = layer_norm_eps
239
+ self.initializer_range = initializer_range
240
+ self.vocab_size = vocab_size
241
+ self.use_weighted_layer_sum = use_weighted_layer_sum
242
+
243
+ if (
244
+ (len(self.conv_stride) != self.num_feat_extract_layers)
245
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
246
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
247
+ ):
248
+ raise ValueError(
249
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
250
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
251
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
252
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
253
+ )
254
+
255
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
256
+ self.mask_time_prob = mask_time_prob
257
+ self.mask_time_length = mask_time_length
258
+ self.mask_time_min_masks = mask_time_min_masks
259
+ self.mask_feature_prob = mask_feature_prob
260
+ self.mask_feature_length = mask_feature_length
261
+ self.mask_feature_min_masks = mask_feature_min_masks
262
+
263
+ # ctc loss
264
+ self.ctc_loss_reduction = ctc_loss_reduction
265
+ self.ctc_zero_infinity = ctc_zero_infinity
266
+
267
+ # adapter
268
+ self.add_adapter = add_adapter
269
+ self.adapter_kernel_size = adapter_kernel_size
270
+ self.adapter_stride = adapter_stride
271
+ self.num_adapter_layers = num_adapter_layers
272
+ self.output_hidden_size = output_hidden_size or hidden_size
273
+
274
+ # SequenceClassification-specific parameter. Feel free to ignore for other classes.
275
+ self.classifier_proj_size = classifier_proj_size
276
+
277
+ # XVector-specific parameters. Feel free to ignore for other classes.
278
+ self.tdnn_dim = list(tdnn_dim)
279
+ self.tdnn_kernel = list(tdnn_kernel)
280
+ self.tdnn_dilation = list(tdnn_dilation)
281
+ self.xvector_output_dim = xvector_output_dim
282
+
283
+ @property
284
+ def inputs_to_logits_ratio(self):
285
+ return math.prod(self.conv_stride)
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_text.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Data2VecText configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class Data2VecTextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Data2VecTextModel`] and [`Data2VecTextModel`]. It
33
+ is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture.
34
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText
35
+ [facebook/data2vec-text-base](https://huggingface.co/facebook/data2vec-text-base) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30522):
43
+ Vocabulary size of the DATA2VEC model. Defines the number of different tokens that can be represented by
44
+ the `inputs_ids` passed when calling [`Data2VecModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ intermediate_size (`int`, *optional*, defaults to 3072):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
53
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 512):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed when calling [`Data2VecModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
70
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
71
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
72
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
73
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
74
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
75
+ is_decoder (`bool`, *optional*, defaults to `False`):
76
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
77
+ use_cache (`bool`, *optional*, defaults to `True`):
78
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
79
+ relevant if `config.is_decoder=True`.
80
+ classifier_dropout (`float`, *optional*):
81
+ The dropout ratio for the classification head.
82
+
83
+ Examples:
84
+
85
+ ```python
86
+ >>> from transformers import Data2VecTextConfig, Data2VecTextModel
87
+
88
+ >>> # Initializing a Data2VecText facebook/data2vec-text-base style configuration
89
+ >>> configuration = Data2VecTextConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the facebook/data2vec-text-base style configuration
92
+ >>> model = Data2VecTextModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "data2vec-text"
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_size=30522,
103
+ hidden_size=768,
104
+ num_hidden_layers=12,
105
+ num_attention_heads=12,
106
+ intermediate_size=3072,
107
+ hidden_act="gelu",
108
+ hidden_dropout_prob=0.1,
109
+ attention_probs_dropout_prob=0.1,
110
+ max_position_embeddings=512,
111
+ type_vocab_size=2,
112
+ initializer_range=0.02,
113
+ layer_norm_eps=1e-12,
114
+ pad_token_id=1,
115
+ bos_token_id=0,
116
+ eos_token_id=2,
117
+ position_embedding_type="absolute",
118
+ use_cache=True,
119
+ classifier_dropout=None,
120
+ **kwargs,
121
+ ):
122
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
123
+
124
+ self.vocab_size = vocab_size
125
+ self.hidden_size = hidden_size
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.num_attention_heads = num_attention_heads
128
+ self.hidden_act = hidden_act
129
+ self.intermediate_size = intermediate_size
130
+ self.hidden_dropout_prob = hidden_dropout_prob
131
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
132
+ self.max_position_embeddings = max_position_embeddings
133
+ self.type_vocab_size = type_vocab_size
134
+ self.initializer_range = initializer_range
135
+ self.layer_norm_eps = layer_norm_eps
136
+ self.position_embedding_type = position_embedding_type
137
+ self.use_cache = use_cache
138
+ self.classifier_dropout = classifier_dropout
139
+
140
+
141
+ class Data2VecTextOnnxConfig(OnnxConfig):
142
+ @property
143
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
144
+ if self.task == "multiple-choice":
145
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
146
+ else:
147
+ dynamic_axis = {0: "batch", 1: "sequence"}
148
+ return OrderedDict(
149
+ [
150
+ ("input_ids", dynamic_axis),
151
+ ("attention_mask", dynamic_axis),
152
+ ]
153
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_vision.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Meta Platforms and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Data2VecVision model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class Data2VecVisionConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`Data2VecVisionModel`]. It is used to instantiate
35
+ an Data2VecVision model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the Data2VecVision
37
+ [facebook/data2vec-vision-base](https://huggingface.co/facebook/data2vec-vision-base) architecture.
38
+
39
+ Args:
40
+ hidden_size (`int`, *optional*, defaults to 768):
41
+ Dimensionality of the encoder layers and the pooler layer.
42
+ num_hidden_layers (`int`, *optional*, defaults to 12):
43
+ Number of hidden layers in the Transformer encoder.
44
+ num_attention_heads (`int`, *optional*, defaults to 12):
45
+ Number of attention heads for each attention layer in the Transformer encoder.
46
+ intermediate_size (`int`, *optional*, defaults to 3072):
47
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
48
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
49
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
50
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
51
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
52
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
53
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ initializer_range (`float`, *optional*, defaults to 0.02):
56
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
58
+ The epsilon used by the layer normalization layers.
59
+ image_size (`int`, *optional*, defaults to 224):
60
+ The size (resolution) of each image.
61
+ patch_size (`int`, *optional*, defaults to 16):
62
+ The size (resolution) of each patch.
63
+ num_channels (`int`, *optional*, defaults to 3):
64
+ The number of input channels.
65
+ use_mask_token (`bool`, *optional*, defaults to `False`):
66
+ Whether to use a mask token for masked image modeling.
67
+ use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
68
+ Whether to use BERT-style absolute position embeddings.
69
+ use_relative_position_bias (`bool`, *optional*, defaults to `False`):
70
+ Whether to use T5-style relative position embeddings in the self-attention layers.
71
+ use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
72
+ Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
73
+ layer_scale_init_value (`float`, *optional*, defaults to 0.1):
74
+ Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
75
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
76
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
77
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
78
+ Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
79
+ CLS token, before applying the classification head.
80
+ out_indices (`List[int]`, *optional*, defaults to `[3, 5, 7, 11]`):
81
+ Indices of the feature maps to use for semantic segmentation.
82
+ pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
83
+ Pooling scales used in Pooling Pyramid Module applied on the last feature map.
84
+ use_auxiliary_head (`bool`, *optional*, defaults to `True`):
85
+ Whether to use an auxiliary head during training.
86
+ auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
87
+ Weight of the cross-entropy loss of the auxiliary head.
88
+ auxiliary_channels (`int`, *optional*, defaults to 256):
89
+ Number of channels to use in the auxiliary head.
90
+ auxiliary_num_convs (`int`, *optional*, defaults to 1):
91
+ Number of convolutional layers to use in the auxiliary head.
92
+ auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
93
+ Whether to concatenate the output of the auxiliary head with the input before the classification layer.
94
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
95
+ The index that is ignored by the loss function of the semantic segmentation model.
96
+
97
+ Example:
98
+
99
+ ```python
100
+ >>> from transformers import Data2VecVisionConfig, Data2VecVisionModel
101
+
102
+ >>> # Initializing a Data2VecVision data2vec_vision-base-patch16-224-in22k style configuration
103
+ >>> configuration = Data2VecVisionConfig()
104
+
105
+ >>> # Initializing a model (with random weights) from the data2vec_vision-base-patch16-224-in22k style configuration
106
+ >>> model = Data2VecVisionModel(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "data2vec-vision"
113
+
114
+ def __init__(
115
+ self,
116
+ hidden_size=768,
117
+ num_hidden_layers=12,
118
+ num_attention_heads=12,
119
+ intermediate_size=3072,
120
+ hidden_act="gelu",
121
+ hidden_dropout_prob=0.0,
122
+ attention_probs_dropout_prob=0.0,
123
+ initializer_range=0.02,
124
+ layer_norm_eps=1e-12,
125
+ image_size=224,
126
+ patch_size=16,
127
+ num_channels=3,
128
+ use_mask_token=False,
129
+ use_absolute_position_embeddings=False,
130
+ use_relative_position_bias=False,
131
+ use_shared_relative_position_bias=False,
132
+ layer_scale_init_value=0.1,
133
+ drop_path_rate=0.1,
134
+ use_mean_pooling=True,
135
+ out_indices=[3, 5, 7, 11],
136
+ pool_scales=[1, 2, 3, 6],
137
+ use_auxiliary_head=True,
138
+ auxiliary_loss_weight=0.4,
139
+ auxiliary_channels=256,
140
+ auxiliary_num_convs=1,
141
+ auxiliary_concat_input=False,
142
+ semantic_loss_ignore_index=255,
143
+ **kwargs,
144
+ ):
145
+ super().__init__(**kwargs)
146
+
147
+ self.hidden_size = hidden_size
148
+ self.num_hidden_layers = num_hidden_layers
149
+ self.num_attention_heads = num_attention_heads
150
+ self.intermediate_size = intermediate_size
151
+ self.hidden_act = hidden_act
152
+ self.hidden_dropout_prob = hidden_dropout_prob
153
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
154
+ self.initializer_range = initializer_range
155
+ self.layer_norm_eps = layer_norm_eps
156
+
157
+ self.image_size = image_size
158
+ self.patch_size = patch_size
159
+ self.num_channels = num_channels
160
+ self.use_mask_token = use_mask_token
161
+ self.use_absolute_position_embeddings = use_absolute_position_embeddings
162
+ self.use_relative_position_bias = use_relative_position_bias
163
+ self.use_shared_relative_position_bias = use_shared_relative_position_bias
164
+ self.layer_scale_init_value = layer_scale_init_value
165
+ self.drop_path_rate = drop_path_rate
166
+ self.use_mean_pooling = use_mean_pooling
167
+ # decode head attributes (semantic segmentation)
168
+ self.out_indices = out_indices
169
+ self.pool_scales = pool_scales
170
+ # auxiliary head attributes (semantic segmentation)
171
+ self.use_auxiliary_head = use_auxiliary_head
172
+ self.auxiliary_loss_weight = auxiliary_loss_weight
173
+ self.auxiliary_channels = auxiliary_channels
174
+ self.auxiliary_num_convs = auxiliary_num_convs
175
+ self.auxiliary_concat_input = auxiliary_concat_input
176
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
177
+
178
+
179
+ # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
180
+ class Data2VecVisionOnnxConfig(OnnxConfig):
181
+ torch_onnx_minimum_version = version.parse("1.11")
182
+
183
+ @property
184
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
185
+ return OrderedDict(
186
+ [
187
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
188
+ ]
189
+ )
190
+
191
+ @property
192
+ def atol_for_validation(self) -> float:
193
+ return 1e-4
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Wav2Vec2 checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ from functools import reduce
21
+
22
+ import fairseq
23
+ import torch
24
+ from datasets import load_dataset
25
+
26
+ from transformers import Wav2Vec2Processor, logging
27
+ from transformers.models.data2vec.configuration_data2vec_audio import Data2VecAudioConfig
28
+
29
+ # Copied from https://github.com/pytorch/fairseq/blob/main/examples/data2vec/models/data2vec_audio.py
30
+ from transformers.models.data2vec.data2vec_audio import Data2VecAudioModel as Dummy # noqa: F401
31
+ from transformers.models.data2vec.modeling_data2vec_audio import Data2VecAudioForCTC, Data2VecAudioModel
32
+
33
+
34
+ logging.set_verbosity_info()
35
+ logger = logging.get_logger(__name__)
36
+
37
+ MAPPING = {
38
+ "post_extract_proj": "feature_projection.projection",
39
+ "models.0.layer_norm": "feature_projection.layer_norm",
40
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
41
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
42
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
43
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
44
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
45
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
46
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
47
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
48
+ "encoder.layer_norm": "encoder.layer_norm",
49
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
50
+ "w2v_encoder.proj": "lm_head",
51
+ "mask_emb": "masked_spec_embed",
52
+ }
53
+ TOP_LEVEL_KEYS = [
54
+ "lm_head",
55
+ ]
56
+
57
+
58
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
59
+ for attribute in key.split("."):
60
+ hf_pointer = getattr(hf_pointer, attribute)
61
+
62
+ if weight_type is not None:
63
+ hf_shape = getattr(hf_pointer, weight_type).shape
64
+ else:
65
+ hf_shape = hf_pointer.shape
66
+
67
+ if hf_shape != value.shape:
68
+ raise ValueError(
69
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
70
+ f" {value.shape} for {full_name}"
71
+ )
72
+
73
+ if weight_type == "weight":
74
+ hf_pointer.weight.data = value
75
+ elif weight_type == "weight_g":
76
+ hf_pointer.weight_g.data = value
77
+ elif weight_type == "weight_v":
78
+ hf_pointer.weight_v.data = value
79
+ elif weight_type == "bias":
80
+ hf_pointer.bias.data = value
81
+ else:
82
+ hf_pointer.data = value
83
+
84
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
85
+
86
+
87
+ def recursively_load_weights(fairseq_model, hf_model, is_headless):
88
+ unused_weights = []
89
+ fairseq_dict = fairseq_model.state_dict()
90
+
91
+ if not is_headless:
92
+ feature_extractor = hf_model.data2vec_audio.feature_extractor
93
+ pos_conv_embedding = hf_model.data2vec_audio.encoder.pos_conv_embed
94
+
95
+ else:
96
+ feature_extractor = hf_model.feature_extractor
97
+ pos_conv_embedding = hf_model.encoder.pos_conv_embed
98
+
99
+ for name, value in fairseq_dict.items():
100
+ is_used = False
101
+ if "conv_layers" in name:
102
+ load_conv_layer(
103
+ name,
104
+ value,
105
+ feature_extractor,
106
+ unused_weights,
107
+ )
108
+ is_used = True
109
+ elif "pos_conv" in name:
110
+ load_pos_conv_layer(
111
+ name,
112
+ value,
113
+ pos_conv_embedding,
114
+ unused_weights,
115
+ )
116
+ is_used = True
117
+ else:
118
+ for key, mapped_key in MAPPING.items():
119
+ if not is_headless:
120
+ mapped_key = "data2vec_audio." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
121
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
122
+ is_used = True
123
+ if "*" in mapped_key:
124
+ layer_index = name.split(key)[0].split(".")[-2]
125
+ mapped_key = mapped_key.replace("*", layer_index)
126
+ if "weight_g" in name:
127
+ weight_type = "weight_g"
128
+ elif "weight_v" in name:
129
+ weight_type = "weight_v"
130
+ elif "bias" in name:
131
+ weight_type = "bias"
132
+ elif "weight" in name:
133
+ # TODO: don't match quantizer.weight_proj
134
+ weight_type = "weight"
135
+ else:
136
+ weight_type = None
137
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
138
+ continue
139
+ if not is_used:
140
+ unused_weights.append(name)
141
+
142
+ logger.warning(f"Unused weights: {unused_weights}")
143
+
144
+
145
+ def access_by_string(module, path):
146
+ names = path.split(".")
147
+ return reduce(getattr, names, module)
148
+
149
+
150
+ def set_weights(full_name, module, fsq_value, hf_weight_path):
151
+ hf_weight = access_by_string(module, hf_weight_path)
152
+ hf_value = hf_weight.data
153
+
154
+ if fsq_value.shape != hf_value.shape:
155
+ raise ValueError(f"{full_name} has size {fsq_value.shape}, but {hf_value.shape} was found.")
156
+ hf_weight.data = fsq_value
157
+ logger.info(f"{full_name} was correctly initialized from {hf_weight_path}.")
158
+
159
+
160
+ def load_conv_layer(full_name, value, feature_extractor, unused_weights):
161
+ name = full_name.split("conv_layers.")[-1]
162
+ items = name.split(".")
163
+ layer_id = int(items[0])
164
+ type_id = int(items[1])
165
+
166
+ weight_type = name.split(".")[-1]
167
+ if type_id == 0:
168
+ layer_type = "conv"
169
+ elif type_id == 2:
170
+ layer_type = "layer_norm"
171
+ else:
172
+ unused_weights.append(full_name)
173
+ return
174
+
175
+ set_weights(full_name, feature_extractor, value, f"conv_layers.{layer_id}.{layer_type}.{weight_type}")
176
+
177
+
178
+ def load_pos_conv_layer(full_name, value, pos_conv_embeddings, unused_weights):
179
+ name = full_name.split("pos_conv.")[-1]
180
+ items = name.split(".")
181
+ layer_id = int(items[0])
182
+ type_id = int(items[1])
183
+
184
+ weight_type = name.split(".")[-1]
185
+ if type_id != 0:
186
+ unused_weights.append(full_name)
187
+ return
188
+ else:
189
+ layer_type = "conv"
190
+
191
+ set_weights(full_name, pos_conv_embeddings, value, f"layers.{layer_id}.{layer_type}.{weight_type}")
192
+
193
+
194
+ @torch.no_grad()
195
+ def convert_wav2vec2_checkpoint(
196
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
197
+ ):
198
+ """
199
+ Copy/paste/tweak model's weights to transformers design.
200
+ """
201
+ if config_path is not None:
202
+ config = Data2VecAudioConfig.from_pretrained(config_path)
203
+ else:
204
+ config = Data2VecAudioConfig()
205
+
206
+ if not is_finetuned:
207
+ # Modify final_proj layer name
208
+ hf_wav2vec = Data2VecAudioModel(config)
209
+ data2vec_checkpoint_dir = os.path.dirname(checkpoint_path)
210
+
211
+ state_dict = torch.load(checkpoint_path)
212
+ state_dict["model"]["final_proj.weight"] = state_dict["model"].pop("final_proj.0.weight")
213
+ state_dict["model"]["final_proj.bias"] = state_dict["model"].pop("final_proj.0.bias")
214
+ converted_ckpt = os.path.join(data2vec_checkpoint_dir, "converted.pt")
215
+ torch.save(state_dict, converted_ckpt)
216
+ else:
217
+ hf_wav2vec = Data2VecAudioForCTC(config)
218
+ converted_ckpt = checkpoint_path
219
+
220
+ def load_data2vec(path):
221
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([path])
222
+ return model[0].eval()
223
+
224
+ model = load_data2vec(converted_ckpt)
225
+
226
+ recursively_load_weights(model, hf_wav2vec, not is_finetuned)
227
+
228
+ processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-lv60")
229
+
230
+ ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
231
+ input_audio = [x["array"] for x in ds[:4]["audio"]]
232
+
233
+ inputs = processor(input_audio, return_tensors="pt", padding=True)
234
+
235
+ input_values = inputs.input_values
236
+ attention_mask = inputs.attention_mask
237
+ # input_values = inputs.input_values[:, :-1]
238
+ # attention_mask = inputs.attention_mask[:, :-1]
239
+
240
+ hf_wav2vec.eval()
241
+ model.eval()
242
+ if is_finetuned:
243
+ their_output = model(source=input_values, padding_mask=(1 - attention_mask), mask=False, features_only=True)[
244
+ "encoder_out"
245
+ ].transpose(0, 1)
246
+ our_output = hf_wav2vec(input_values, attention_mask=attention_mask)["logits"]
247
+
248
+ pred_ids = torch.argmax(our_output, dim=-1)
249
+ output_string = processor.batch_decode(pred_ids)
250
+
251
+ print(f"Expected Output: {ds[:4]['text']}, Pred: {output_string}")
252
+ else:
253
+ their_output = model(source=input_values, padding_mask=(1 - attention_mask), mask=False, features_only=True)[
254
+ "layer_results"
255
+ ][-1][0].transpose(0, 1)
256
+ our_output = hf_wav2vec(input_values, attention_mask=attention_mask)["last_hidden_state"]
257
+
258
+ print(our_output.shape, their_output.shape)
259
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
260
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
261
+ success = torch.allclose(our_output, their_output, atol=1e-3)
262
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
263
+ if not success:
264
+ raise Exception("Something went wRoNg")
265
+
266
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
267
+
268
+ if is_finetuned:
269
+ processor.save_pretrained(pytorch_dump_folder_path)
270
+ else:
271
+ processor.feature_extractor.save_pretrained(pytorch_dump_folder_path)
272
+
273
+
274
+ if __name__ == "__main__":
275
+ parser = argparse.ArgumentParser()
276
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
277
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
278
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
279
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
280
+ parser.add_argument(
281
+ "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
282
+ )
283
+ args = parser.parse_args()
284
+ convert_wav2vec2_checkpoint(
285
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
286
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert data2vec checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ import pathlib
21
+
22
+ import fairseq
23
+ import torch
24
+ from fairseq.modules import TransformerSentenceEncoderLayer
25
+ from packaging import version
26
+
27
+ from transformers import (
28
+ Data2VecTextConfig,
29
+ Data2VecTextForMaskedLM,
30
+ Data2VecTextForSequenceClassification,
31
+ Data2VecTextModel,
32
+ )
33
+ from transformers.models.bert.modeling_bert import (
34
+ BertIntermediate,
35
+ BertLayer,
36
+ BertOutput,
37
+ BertSelfAttention,
38
+ BertSelfOutput,
39
+ )
40
+
41
+ # IMPORTANT: In order for this script to run, please make sure to download the dictionary: `dict.txt` from wget https://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz
42
+ # File copied from https://github.com/pytorch/fairseq/blob/main/examples/data2vec/models/data2vec_text.py
43
+ from transformers.utils import logging
44
+
45
+
46
+ if version.parse(fairseq.__version__) < version.parse("0.9.0"):
47
+ raise Exception("requires fairseq >= 0.9.0")
48
+
49
+
50
+ logging.set_verbosity_info()
51
+ logger = logging.get_logger(__name__)
52
+
53
+ SAMPLE_TEXT = "Hello world! cécé herlolip"
54
+
55
+
56
+ def convert_data2vec_checkpoint_to_pytorch(
57
+ data2vec_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
58
+ ):
59
+ """
60
+ Copy/paste/tweak data2vec's weights to our BERT structure.
61
+ """
62
+ data2vec_checkpoint_dir, data2vec_checkpoint_file_name = os.path.split(data2vec_checkpoint_path)
63
+ data2vec = Data2VecTextModel.from_pretrained(
64
+ data2vec_checkpoint_dir, checkpoint_file=data2vec_checkpoint_file_name
65
+ )
66
+ data2vec.eval() # disable dropout
67
+ data2vec_model = data2vec.models[0]
68
+ data2vec_sent_encoder = data2vec_model.encoder.sentence_encoder
69
+ config = Data2VecTextConfig(
70
+ vocab_size=data2vec_sent_encoder.embed_tokens.num_embeddings,
71
+ hidden_size=data2vec_model.args.encoder_embed_dim,
72
+ num_hidden_layers=data2vec_model.args.encoder_layers,
73
+ num_attention_heads=data2vec_model.args.encoder_attention_heads,
74
+ intermediate_size=data2vec_model.args.encoder_ffn_embed_dim,
75
+ max_position_embeddings=514,
76
+ type_vocab_size=1,
77
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
78
+ )
79
+ if classification_head:
80
+ config.num_labels = data2vec.model.classification_heads["mnli"].out_proj.weight.shape[0]
81
+ print("Our BERT config:", config)
82
+
83
+ model = Data2VecTextForSequenceClassification(config) if classification_head else Data2VecTextForMaskedLM(config)
84
+ model.eval()
85
+
86
+ # Now let's copy all the weights.
87
+ # Embeddings
88
+ model.data2vec_text.embeddings.word_embeddings.weight = data2vec_sent_encoder.embed_tokens.weight
89
+ model.data2vec_text.embeddings.position_embeddings.weight = data2vec_sent_encoder.embed_positions.weight
90
+ model.data2vec_text.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
91
+ model.data2vec_text.embeddings.token_type_embeddings.weight
92
+ ) # just zero them out b/c data2vec doesn't use them.
93
+ model.data2vec_text.embeddings.LayerNorm.weight = data2vec_sent_encoder.layernorm_embedding.weight
94
+ model.data2vec_text.embeddings.LayerNorm.bias = data2vec_sent_encoder.layernorm_embedding.bias
95
+
96
+ for i in range(config.num_hidden_layers):
97
+ # Encoder: start of layer
98
+ layer: BertLayer = model.data2vec_text.encoder.layer[i]
99
+ data2vec_layer: TransformerSentenceEncoderLayer = data2vec_sent_encoder.layers[i]
100
+
101
+ # self attention
102
+ self_attn: BertSelfAttention = layer.attention.self
103
+ assert data2vec_layer.self_attn.k_proj.weight.data.shape == torch.Size(
104
+ (config.hidden_size, config.hidden_size)
105
+ ), (
106
+ "Shape for data2vec_layer.self_attn.k_proj.weight.data should be"
107
+ f" {torch.Size((config.hidden_size, config.hidden_size))}"
108
+ )
109
+ assert data2vec_layer.self_attn.q_proj.weight.data.shape == torch.Size(
110
+ (config.hidden_size, config.hidden_size)
111
+ ), (
112
+ "Shape for data2vec_layer.self_attn.q_proj.weight.data should be"
113
+ f" {torch.Size((config.hidden_size, config.hidden_size))}"
114
+ )
115
+ assert data2vec_layer.self_attn.v_proj.weight.data.shape == torch.Size(
116
+ (config.hidden_size, config.hidden_size)
117
+ ), (
118
+ "Shape for data2vec_layer.self_attn.v_proj.weight.data should be"
119
+ f" {torch.Size((config.hidden_size, config.hidden_size))}"
120
+ )
121
+
122
+ self_attn.query.weight.data = data2vec_layer.self_attn.q_proj.weight
123
+ self_attn.query.bias.data = data2vec_layer.self_attn.q_proj.bias
124
+ self_attn.key.weight.data = data2vec_layer.self_attn.k_proj.weight
125
+ self_attn.key.bias.data = data2vec_layer.self_attn.k_proj.bias
126
+ self_attn.value.weight.data = data2vec_layer.self_attn.v_proj.weight
127
+ self_attn.value.bias.data = data2vec_layer.self_attn.v_proj.bias
128
+
129
+ # self-attention output
130
+ self_output: BertSelfOutput = layer.attention.output
131
+ assert (
132
+ self_output.dense.weight.shape == data2vec_layer.self_attn.out_proj.weight.shape
133
+ ), f"Shape for self_output.dense.weight should be {data2vec_layer.self_attn.out_proj.weight.shape}"
134
+ self_output.dense.weight = data2vec_layer.self_attn.out_proj.weight
135
+ self_output.dense.bias = data2vec_layer.self_attn.out_proj.bias
136
+ self_output.LayerNorm.weight = data2vec_layer.self_attn_layer_norm.weight
137
+ self_output.LayerNorm.bias = data2vec_layer.self_attn_layer_norm.bias
138
+
139
+ # intermediate
140
+ intermediate: BertIntermediate = layer.intermediate
141
+ assert (
142
+ intermediate.dense.weight.shape == data2vec_layer.fc1.weight.shape
143
+ ), f"Shape for intermediate.dense.weight should be {data2vec_layer.fc1.weight.shape}"
144
+ intermediate.dense.weight = data2vec_layer.fc1.weight
145
+ intermediate.dense.bias = data2vec_layer.fc1.bias
146
+
147
+ # output
148
+ bert_output: BertOutput = layer.output
149
+ assert (
150
+ bert_output.dense.weight.shape == data2vec_layer.fc2.weight.shape
151
+ ), f"Shape for bert_output.dense.weight should be {data2vec_layer.fc2.weight.shape}"
152
+ bert_output.dense.weight = data2vec_layer.fc2.weight
153
+ bert_output.dense.bias = data2vec_layer.fc2.bias
154
+ bert_output.LayerNorm.weight = data2vec_layer.final_layer_norm.weight
155
+ bert_output.LayerNorm.bias = data2vec_layer.final_layer_norm.bias
156
+ # end of layer
157
+
158
+ if classification_head:
159
+ model.classifier.dense.weight = data2vec.model.classification_heads["mnli"].dense.weight
160
+ model.classifier.dense.bias = data2vec.model.classification_heads["mnli"].dense.bias
161
+ model.classifier.out_proj.weight = data2vec.model.classification_heads["mnli"].out_proj.weight
162
+ model.classifier.out_proj.bias = data2vec.model.classification_heads["mnli"].out_proj.bias
163
+ else:
164
+ # LM Head
165
+ model.lm_head.dense.weight = data2vec_model.encoder.lm_head.dense.weight
166
+ model.lm_head.dense.bias = data2vec_model.encoder.lm_head.dense.bias
167
+ model.lm_head.layer_norm.weight = data2vec_model.encoder.lm_head.layer_norm.weight
168
+ model.lm_head.layer_norm.bias = data2vec_model.encoder.lm_head.layer_norm.bias
169
+ model.lm_head.decoder.weight = data2vec_model.encoder.lm_head.weight
170
+ model.lm_head.decoder.bias = data2vec_model.encoder.lm_head.bias
171
+
172
+ # Let's check that we get the same results.
173
+ input_ids: torch.Tensor = data2vec.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
174
+
175
+ our_output = model(input_ids)[0]
176
+ if classification_head:
177
+ their_output = data2vec.model.classification_heads["mnli"](data2vec.extract_features(input_ids))
178
+ else:
179
+ their_output = data2vec_model(input_ids)[0]
180
+ print(our_output.shape, their_output.shape)
181
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
182
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
183
+ success = torch.allclose(our_output, their_output, atol=1e-3)
184
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
185
+ if not success:
186
+ raise Exception("Something went wRoNg")
187
+
188
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
189
+ print(f"Saving model to {pytorch_dump_folder_path}")
190
+ model.save_pretrained(pytorch_dump_folder_path)
191
+
192
+
193
+ if __name__ == "__main__":
194
+ parser = argparse.ArgumentParser()
195
+ # Required parameters
196
+ parser.add_argument(
197
+ "--checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
198
+ )
199
+ parser.add_argument(
200
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
201
+ )
202
+ parser.add_argument(
203
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
204
+ )
205
+ args = parser.parse_args()
206
+ convert_data2vec_checkpoint_to_pytorch(
207
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
208
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+
5
+ import torch
6
+ from huggingface_hub import hf_hub_download
7
+ from PIL import Image
8
+ from timm.models import create_model
9
+
10
+ from transformers import (
11
+ BeitImageProcessor,
12
+ Data2VecVisionConfig,
13
+ Data2VecVisionForImageClassification,
14
+ Data2VecVisionModel,
15
+ )
16
+
17
+
18
+ def create_rename_keys(config, has_lm_head=False, is_semantic=False, hf_prefix="data2vec."):
19
+ prefix = "backbone." if is_semantic else ""
20
+
21
+ rename_keys = []
22
+ for i in range(config.num_hidden_layers):
23
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
24
+ rename_keys.append(
25
+ (f"{prefix}blocks.{i}.norm1.weight", f"{hf_prefix}encoder.layer.{i}.layernorm_before.weight")
26
+ )
27
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"{hf_prefix}encoder.layer.{i}.layernorm_before.bias"))
28
+ rename_keys.append(
29
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"{hf_prefix}encoder.layer.{i}.attention.output.dense.weight")
30
+ )
31
+ rename_keys.append(
32
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"{hf_prefix}encoder.layer.{i}.attention.output.dense.bias")
33
+ )
34
+ rename_keys.append(
35
+ (f"{prefix}blocks.{i}.norm2.weight", f"{hf_prefix}encoder.layer.{i}.layernorm_after.weight")
36
+ )
37
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"{hf_prefix}encoder.layer.{i}.layernorm_after.bias"))
38
+ rename_keys.append(
39
+ (f"{prefix}blocks.{i}.mlp.fc1.weight", f"{hf_prefix}encoder.layer.{i}.intermediate.dense.weight")
40
+ )
41
+ rename_keys.append(
42
+ (f"{prefix}blocks.{i}.mlp.fc1.bias", f"{hf_prefix}encoder.layer.{i}.intermediate.dense.bias")
43
+ )
44
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"{hf_prefix}encoder.layer.{i}.output.dense.weight"))
45
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"{hf_prefix}encoder.layer.{i}.output.dense.bias"))
46
+
47
+ # projection layer + position embeddings
48
+ rename_keys.extend(
49
+ [
50
+ (f"{prefix}cls_token", f"{hf_prefix}embeddings.cls_token"),
51
+ (f"{prefix}patch_embed.proj.weight", f"{hf_prefix}embeddings.patch_embeddings.projection.weight"),
52
+ (f"{prefix}patch_embed.proj.bias", f"{hf_prefix}embeddings.patch_embeddings.projection.bias"),
53
+ ]
54
+ )
55
+
56
+ if has_lm_head:
57
+ # mask token + shared relative position bias + layernorm
58
+ rename_keys.extend(
59
+ [
60
+ ("mask_token", f"{hf_prefix}embeddings.mask_token"),
61
+ (
62
+ "rel_pos_bias.relative_position_bias_table",
63
+ f"{hf_prefix}encoder.relative_position_bias.relative_position_bias_table",
64
+ ),
65
+ (
66
+ "rel_pos_bias.relative_position_index",
67
+ f"{hf_prefix}encoder.relative_position_bias.relative_position_index",
68
+ ),
69
+ ("norm.weight", "layernorm.weight"),
70
+ ("norm.bias", "layernorm.bias"),
71
+ ]
72
+ )
73
+ elif is_semantic:
74
+ # semantic segmentation classification heads
75
+ rename_keys.extend(
76
+ [
77
+ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
78
+ ("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
79
+ ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
80
+ ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
81
+ ]
82
+ )
83
+ else:
84
+ # layernorm + classification head
85
+ rename_keys.extend(
86
+ [
87
+ ("fc_norm.weight", f"{hf_prefix}pooler.layernorm.weight"),
88
+ ("fc_norm.bias", f"{hf_prefix}pooler.layernorm.bias"),
89
+ ("head.weight", "classifier.weight"),
90
+ ("head.bias", "classifier.bias"),
91
+ ]
92
+ )
93
+
94
+ return rename_keys
95
+
96
+
97
+ def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False, hf_prefix="data2vec_vision."):
98
+ for i in range(config.num_hidden_layers):
99
+ prefix = "backbone." if is_semantic else ""
100
+ # queries, keys and values
101
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
102
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
103
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
104
+
105
+ state_dict[f"{hf_prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
106
+ : config.hidden_size, :
107
+ ]
108
+ state_dict[f"{hf_prefix}encoder.layer.{i}.attention.attention.query.bias"] = q_bias
109
+ state_dict[f"{hf_prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
110
+ config.hidden_size : config.hidden_size * 2, :
111
+ ]
112
+ state_dict[f"{hf_prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
113
+ -config.hidden_size :, :
114
+ ]
115
+ state_dict[f"{hf_prefix}encoder.layer.{i}.attention.attention.value.bias"] = v_bias
116
+
117
+ # gamma_1 and gamma_2
118
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
119
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
120
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
121
+
122
+ state_dict[f"{hf_prefix}encoder.layer.{i}.lambda_1"] = gamma_1
123
+ state_dict[f"{hf_prefix}encoder.layer.{i}.lambda_2"] = gamma_2
124
+
125
+ # relative_position bias table + index
126
+ if not has_lm_head:
127
+ # each layer has its own relative position bias
128
+ table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
129
+ index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
130
+
131
+ state_dict[
132
+ f"{hf_prefix}encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
133
+ ] = table
134
+ state_dict[
135
+ f"{hf_prefix}encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
136
+ ] = index
137
+
138
+
139
+ def get_args():
140
+ parser = argparse.ArgumentParser(
141
+ "Convert Data2VecVision to HF for image classification and pretraining", add_help=False
142
+ )
143
+ parser.add_argument("--hf_checkpoint_name", type=str)
144
+ parser.add_argument("--input_size", default=224, type=int, help="images input size")
145
+ parser.add_argument("--beit_checkpoint", default="", help="beit checkpoint")
146
+
147
+ return parser.parse_args()
148
+
149
+
150
+ def load_beit_model(args, is_finetuned, is_large):
151
+ def load_state_dict(model, state_dict, prefix="", ignore_missing="relative_position_index"):
152
+ missing_keys = []
153
+ unexpected_keys = []
154
+ error_msgs = []
155
+ # copy state_dict so _load_from_state_dict can modify it
156
+ metadata = getattr(state_dict, "_metadata", None)
157
+ state_dict = state_dict.copy()
158
+ if metadata is not None:
159
+ state_dict._metadata = metadata
160
+
161
+ def load(module, prefix=""):
162
+ local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
163
+ module._load_from_state_dict(
164
+ state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
165
+ )
166
+ for name, child in module._modules.items():
167
+ if child is not None:
168
+ load(child, prefix + name + ".")
169
+
170
+ load(model, prefix=prefix)
171
+
172
+ warn_missing_keys = []
173
+ ignore_missing_keys = []
174
+ for key in missing_keys:
175
+ keep_flag = True
176
+ for ignore_key in ignore_missing.split("|"):
177
+ if ignore_key in key:
178
+ keep_flag = False
179
+ break
180
+ if keep_flag:
181
+ warn_missing_keys.append(key)
182
+ else:
183
+ ignore_missing_keys.append(key)
184
+
185
+ missing_keys = warn_missing_keys
186
+
187
+ if len(missing_keys) > 0:
188
+ print(
189
+ "Weights of {} not initialized from pretrained model: {}".format(
190
+ model.__class__.__name__, missing_keys
191
+ )
192
+ )
193
+ if len(unexpected_keys) > 0:
194
+ print("Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys))
195
+ if len(ignore_missing_keys) > 0:
196
+ print(
197
+ "Ignored weights of {} not initialized from pretrained model: {}".format(
198
+ model.__class__.__name__, ignore_missing_keys
199
+ )
200
+ )
201
+ if len(error_msgs) > 0:
202
+ print("\n".join(error_msgs))
203
+
204
+ model_kwargs = {
205
+ "pretrained": False,
206
+ "use_shared_rel_pos_bias": True,
207
+ "use_abs_pos_emb": False,
208
+ "init_values": 0.1,
209
+ }
210
+
211
+ if is_finetuned:
212
+ model_kwargs.update(
213
+ {
214
+ "num_classes": 1000,
215
+ "use_mean_pooling": True,
216
+ "init_scale": 0.001,
217
+ "use_rel_pos_bias": True,
218
+ }
219
+ )
220
+
221
+ model = create_model(
222
+ "beit_large_patch16_224" if is_large else "beit_base_patch16_224",
223
+ **model_kwargs,
224
+ )
225
+ patch_size = model.patch_embed.patch_size
226
+ args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
227
+ checkpoint = torch.load(args.beit_checkpoint, map_location="cpu")
228
+
229
+ print(f"Load ckpt from {args.beit_checkpoint}")
230
+ checkpoint_model = None
231
+ for model_key in ("model", "module"):
232
+ if model_key in checkpoint:
233
+ checkpoint_model = checkpoint[model_key]
234
+ print(f"Load state_dict by model_key = {model_key}")
235
+ break
236
+
237
+ all_keys = list(checkpoint_model.keys())
238
+ for key in all_keys:
239
+ if "relative_position_index" in key:
240
+ checkpoint_model.pop(key)
241
+
242
+ if "relative_position_bias_table" in key:
243
+ rel_pos_bias = checkpoint_model[key]
244
+ src_num_pos, num_attn_heads = rel_pos_bias.size()
245
+ dst_num_pos, _ = model.state_dict()[key].size()
246
+ dst_patch_shape = model.patch_embed.patch_shape
247
+ if dst_patch_shape[0] != dst_patch_shape[1]:
248
+ raise NotImplementedError()
249
+
250
+ load_state_dict(model, checkpoint_model, prefix="")
251
+
252
+ return model
253
+
254
+
255
+ def main():
256
+ args = get_args()
257
+
258
+ is_finetuned = "ft1k" in args.hf_checkpoint_name
259
+ is_large = "large" in args.hf_checkpoint_name
260
+
261
+ if is_finetuned:
262
+ # To convert Beit's data2vec_vision to HF you need to copy
263
+ # https://github.com/facebookresearch/data2vec_vision/blob/main/beit/modeling_finetune.py
264
+ # into this folder.
265
+ import modeling_finetune # noqa: F401
266
+ else:
267
+ # To convert Beit's data2vec_vision to HF you need to copy
268
+ # https://github.com/facebookresearch/data2vec_vision/blob/main/beit/modeling_cyclical.py
269
+ # into this folder
270
+ # IMPORTANT: Note that for now we've only converted the down-stream
271
+ # model and not the full pretrained model. This means for the integration
272
+ # test you need to add a `return x` after the following line:
273
+ # https://github.com/facebookresearch/data2vec_vision/blob/af9a36349aaed59ae66e69b5dabeef2d62fdc5da/beit/modeling_cyclical.py#L197
274
+ # to make the integration test pass.
275
+ import modeling_cyclical # noqa: F401
276
+
277
+ # 1. Create model config
278
+ config = Data2VecVisionConfig()
279
+ if is_finetuned:
280
+ config.use_relative_position_bias = True
281
+ config.use_shared_relative_position_bias = False
282
+ config.use_mean_pooling = True
283
+ config.num_labels = 1000
284
+
285
+ repo_id = "huggingface/label-files"
286
+ filename = "imagenet-1k-id2label.json"
287
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
288
+ id2label = {int(k): v for k, v in id2label.items()}
289
+ config.id2label = id2label
290
+ config.label2id = {v: k for k, v in id2label.items()}
291
+ else:
292
+ config.use_relative_position_bias = False
293
+ config.use_shared_relative_position_bias = True
294
+ config.use_mean_pooling = False
295
+
296
+ if is_large:
297
+ config.hidden_size = 1024
298
+ config.intermediate_size = 4096
299
+ config.num_hidden_layers = 24
300
+ config.num_attention_heads = 16
301
+
302
+ # 2. Load Beit model
303
+ orig_model = load_beit_model(args, is_finetuned, is_large)
304
+ orig_model.eval()
305
+
306
+ # 3. Forward Beit model
307
+ image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
308
+ image = Image.open("../../../../tests/fixtures/tests_samples/COCO/000000039769.png")
309
+ encoding = image_processor(images=image, return_tensors="pt")
310
+ pixel_values = encoding["pixel_values"]
311
+
312
+ orig_args = (pixel_values,) if is_finetuned else (pixel_values, None)
313
+ with torch.no_grad():
314
+ orig_model_output = orig_model(*orig_args)
315
+
316
+ # 4. Load HF Data2VecVision model
317
+ if is_finetuned:
318
+ hf_model = Data2VecVisionForImageClassification(config)
319
+ hf_model.eval()
320
+ has_lm_head = False
321
+ hf_prefix = "data2vec_vision."
322
+ else:
323
+ hf_model = Data2VecVisionModel(config)
324
+ hf_model.eval()
325
+ has_lm_head = True
326
+ hf_prefix = ""
327
+
328
+ rename_keys = create_rename_keys(config, hf_prefix=hf_prefix, has_lm_head=has_lm_head)
329
+ state_dict = orig_model.state_dict()
330
+ for src, dest in rename_keys:
331
+ val = state_dict.pop(src)
332
+ state_dict[dest] = val
333
+
334
+ read_in_q_k_v(state_dict, config, hf_prefix=hf_prefix, has_lm_head=has_lm_head)
335
+ missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False)
336
+ print("HF missing", missing_keys)
337
+ print("HF unexpected_keys", unexpected_keys)
338
+
339
+ # 5. Forward HF Data2VecVision model
340
+ with torch.no_grad():
341
+ hf_model_output = hf_model(pixel_values)
342
+
343
+ hf_output = hf_model_output.logits if is_finetuned else hf_model_output.last_hidden_state
344
+
345
+ # 6. Compare
346
+ max_absolute_diff = torch.max(torch.abs(hf_output - orig_model_output)).item()
347
+
348
+ print(f"max_absolute_diff = {max_absolute_diff}")
349
+ success = torch.allclose(hf_output, orig_model_output, atol=1e-3)
350
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
351
+ if not success:
352
+ raise Exception("Something went wRoNg")
353
+
354
+ # 7. Save
355
+ print(f"Saving to {args.hf_checkpoint_name}")
356
+ hf_model.save_pretrained(args.hf_checkpoint_name)
357
+ image_processor.save_pretrained(args.hf_checkpoint_name)
358
+
359
+
360
+ if __name__ == "__main__":
361
+ main()
362
+ # Run the following to convert checkpoints
363
+ # python ./convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py \
364
+ # --beit_checkpoint ./pretrained_base.pt \
365
+ # --hf_checkpoint_name "./data2vec-vision-base"
366
+ # python ./convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py \
367
+ # --beit_checkpoint ./finetuned_base.pt \
368
+ # --hf_checkpoint_name "./data2vec-vision-base-ft1k"
369
+ # python ./convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py \
370
+ # --beit_checkpoint ./pretrained_large.pt \
371
+ # --hf_checkpoint_name "./data2vec-vision-large"
372
+ # python ./convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py \
373
+ # --beit_checkpoint ./finetuned_large.pt \
374
+ # --hf_checkpoint_name "./data2vec-vision-large-ft1k"
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_audio.py ADDED
@@ -0,0 +1,1514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Data2VecAudio model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ CausalLMOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ Wav2Vec2BaseModelOutput,
35
+ XVectorOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...utils import (
39
+ add_code_sample_docstrings,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ is_peft_available,
43
+ logging,
44
+ )
45
+ from .configuration_data2vec_audio import Data2VecAudioConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ _HIDDEN_STATES_START_POSITION = 2
52
+
53
+ # General docstring
54
+ _CONFIG_FOR_DOC = "Data2VecAudioConfig"
55
+
56
+ # Base docstring
57
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-audio-base-960h"
58
+ _EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
59
+
60
+ # CTC docstring
61
+ _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
62
+ _CTC_EXPECTED_LOSS = 66.95
63
+
64
+
65
+ from ..deprecated._archive_maps import DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
66
+
67
+
68
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
69
+ def _compute_mask_indices(
70
+ shape: Tuple[int, int],
71
+ mask_prob: float,
72
+ mask_length: int,
73
+ attention_mask: Optional[torch.LongTensor] = None,
74
+ min_masks: int = 0,
75
+ ) -> np.ndarray:
76
+ """
77
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
78
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
79
+ CPU as part of the preprocessing during training.
80
+
81
+ Args:
82
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
83
+ the first element is the batch size and the second element is the length of the axis to span.
84
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
85
+ independently generated mask spans of length `mask_length` is computed by
86
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
87
+ actual percentage will be smaller.
88
+ mask_length: size of the mask
89
+ min_masks: minimum number of masked spans
90
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
91
+ each batch dimension.
92
+ """
93
+ batch_size, sequence_length = shape
94
+
95
+ if mask_length < 1:
96
+ raise ValueError("`mask_length` has to be bigger than 0.")
97
+
98
+ if mask_length > sequence_length:
99
+ raise ValueError(
100
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
101
+ f" and `sequence_length`: {sequence_length}`"
102
+ )
103
+
104
+ # epsilon is used for probabilistic rounding
105
+ epsilon = np.random.rand(1).item()
106
+
107
+ def compute_num_masked_span(input_length):
108
+ """Given input length, compute how many spans should be masked"""
109
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
110
+ num_masked_span = max(num_masked_span, min_masks)
111
+
112
+ # make sure num masked span <= sequence_length
113
+ if num_masked_span * mask_length > sequence_length:
114
+ num_masked_span = sequence_length // mask_length
115
+
116
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
117
+ if input_length - (mask_length - 1) < num_masked_span:
118
+ num_masked_span = max(input_length - (mask_length - 1), 0)
119
+
120
+ return num_masked_span
121
+
122
+ # compute number of masked spans in batch
123
+ input_lengths = (
124
+ attention_mask.sum(-1).detach().tolist()
125
+ if attention_mask is not None
126
+ else [sequence_length for _ in range(batch_size)]
127
+ )
128
+
129
+ # SpecAugment mask to fill
130
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
131
+ spec_aug_mask_idxs = []
132
+
133
+ max_num_masked_span = compute_num_masked_span(sequence_length)
134
+
135
+ if max_num_masked_span == 0:
136
+ return spec_aug_mask
137
+
138
+ for input_length in input_lengths:
139
+ # compute num of masked spans for this input
140
+ num_masked_span = compute_num_masked_span(input_length)
141
+
142
+ # get random indices to mask
143
+ spec_aug_mask_idx = np.random.choice(
144
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
145
+ )
146
+
147
+ # pick first sampled index that will serve as a dummy index to pad vector
148
+ # to ensure same dimension for all batches due to probabilistic rounding
149
+ # Picking first sample just pads those vectors twice.
150
+ if len(spec_aug_mask_idx) == 0:
151
+ # this case can only happen if `input_length` is strictly smaller then
152
+ # `sequence_length` in which case the last token has to be a padding
153
+ # token which we can use as a dummy mask id
154
+ dummy_mask_idx = sequence_length - 1
155
+ else:
156
+ dummy_mask_idx = spec_aug_mask_idx[0]
157
+
158
+ spec_aug_mask_idx = np.concatenate(
159
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
160
+ )
161
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
162
+
163
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
164
+
165
+ # expand masked indices to masked spans
166
+ spec_aug_mask_idxs = np.broadcast_to(
167
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
168
+ )
169
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
170
+
171
+ # add offset to the starting indexes so that indexes now create a span
172
+ offsets = np.arange(mask_length)[None, None, :]
173
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
174
+ batch_size, max_num_masked_span * mask_length
175
+ )
176
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
177
+
178
+ # ensure that we cannot have indices larger than sequence_length
179
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
180
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
181
+
182
+ # scatter indices to mask
183
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
184
+
185
+ return spec_aug_mask
186
+
187
+
188
+ class Data2VecAudioConvLayer(nn.Module):
189
+ def __init__(self, config, layer_id=0):
190
+ super().__init__()
191
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
192
+ self.out_conv_dim = config.conv_dim[layer_id]
193
+
194
+ self.conv = nn.Conv1d(
195
+ self.in_conv_dim,
196
+ self.out_conv_dim,
197
+ kernel_size=config.conv_kernel[layer_id],
198
+ stride=config.conv_stride[layer_id],
199
+ bias=config.conv_bias,
200
+ )
201
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
202
+ self.activation = ACT2FN[config.feat_extract_activation]
203
+
204
+ def forward(self, hidden_states):
205
+ hidden_states = self.conv(hidden_states)
206
+
207
+ hidden_states = hidden_states.transpose(-2, -1)
208
+ hidden_states = self.layer_norm(hidden_states)
209
+ hidden_states = hidden_states.transpose(-2, -1)
210
+
211
+ hidden_states = self.activation(hidden_states)
212
+ return hidden_states
213
+
214
+
215
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Data2VecAudio
216
+ class Data2VecAudioPadLayer(nn.Module):
217
+ def __init__(self, num_conv_pos_embeddings):
218
+ super().__init__()
219
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
220
+
221
+ def forward(self, hidden_states):
222
+ if self.num_pad_remove > 0:
223
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
224
+ return hidden_states
225
+
226
+
227
+ class Data2VecAudioPositionalConvLayer(nn.Module):
228
+ def __init__(self, config):
229
+ super().__init__()
230
+ self.conv = nn.Conv1d(
231
+ config.hidden_size,
232
+ config.hidden_size,
233
+ kernel_size=config.conv_pos_kernel_size,
234
+ padding=config.conv_pos_kernel_size // 2,
235
+ groups=config.num_conv_pos_embedding_groups,
236
+ )
237
+
238
+ self.padding = Data2VecAudioPadLayer(config.conv_pos_kernel_size)
239
+ self.activation = ACT2FN[config.feat_extract_activation]
240
+ # no learnable parameters
241
+ self.layer_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False)
242
+
243
+ def forward(self, hidden_states):
244
+ hidden_states = self.conv(hidden_states)
245
+ hidden_states = self.padding(hidden_states)
246
+
247
+ hidden_states = hidden_states.transpose(1, 2)
248
+ hidden_states = self.layer_norm(hidden_states)
249
+ hidden_states = hidden_states.transpose(1, 2)
250
+ hidden_states = self.activation(hidden_states)
251
+ return hidden_states
252
+
253
+
254
+ class Data2VecAudioPositionalConvEmbedding(nn.Module):
255
+ def __init__(self, config):
256
+ super().__init__()
257
+ self.layers = nn.ModuleList(
258
+ [Data2VecAudioPositionalConvLayer(config) for _ in range(config.num_conv_pos_embeddings)]
259
+ )
260
+
261
+ def forward(self, hidden_states):
262
+ hidden_states = hidden_states.transpose(1, 2)
263
+ for layer in self.layers:
264
+ hidden_states = layer(hidden_states)
265
+ hidden_states = hidden_states.transpose(1, 2)
266
+ return hidden_states
267
+
268
+
269
+ class Data2VecAudioFeatureEncoder(nn.Module):
270
+ """Construct the features from raw audio waveform"""
271
+
272
+ def __init__(self, config):
273
+ super().__init__()
274
+ self.conv_layers = nn.ModuleList(
275
+ [Data2VecAudioConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
276
+ )
277
+ self.gradient_checkpointing = False
278
+ self._requires_grad = True
279
+
280
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder._freeze_parameters
281
+ def _freeze_parameters(self):
282
+ for param in self.parameters():
283
+ param.requires_grad = False
284
+ self._requires_grad = False
285
+
286
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder.forward
287
+ def forward(self, input_values):
288
+ hidden_states = input_values[:, None]
289
+
290
+ # make sure hidden_states require grad for gradient_checkpointing
291
+ if self._requires_grad and self.training:
292
+ hidden_states.requires_grad = True
293
+
294
+ for conv_layer in self.conv_layers:
295
+ if self._requires_grad and self.gradient_checkpointing and self.training:
296
+ hidden_states = self._gradient_checkpointing_func(
297
+ conv_layer.__call__,
298
+ hidden_states,
299
+ )
300
+ else:
301
+ hidden_states = conv_layer(hidden_states)
302
+
303
+ return hidden_states
304
+
305
+
306
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->Data2VecAudio
307
+ class Data2VecAudioFeatureProjection(nn.Module):
308
+ def __init__(self, config):
309
+ super().__init__()
310
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
311
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
312
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
313
+
314
+ def forward(self, hidden_states):
315
+ # non-projected hidden states are needed for quantization
316
+ norm_hidden_states = self.layer_norm(hidden_states)
317
+ hidden_states = self.projection(norm_hidden_states)
318
+ hidden_states = self.dropout(hidden_states)
319
+ return hidden_states, norm_hidden_states
320
+
321
+
322
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Data2VecAudio
323
+ class Data2VecAudioAttention(nn.Module):
324
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
325
+
326
+ def __init__(
327
+ self,
328
+ embed_dim: int,
329
+ num_heads: int,
330
+ dropout: float = 0.0,
331
+ is_decoder: bool = False,
332
+ bias: bool = True,
333
+ is_causal: bool = False,
334
+ config: Optional[Data2VecAudioConfig] = None,
335
+ ):
336
+ super().__init__()
337
+ self.embed_dim = embed_dim
338
+ self.num_heads = num_heads
339
+ self.dropout = dropout
340
+ self.head_dim = embed_dim // num_heads
341
+ self.config = config
342
+
343
+ if (self.head_dim * num_heads) != self.embed_dim:
344
+ raise ValueError(
345
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
346
+ f" and `num_heads`: {num_heads})."
347
+ )
348
+ self.scaling = self.head_dim**-0.5
349
+ self.is_decoder = is_decoder
350
+ self.is_causal = is_causal
351
+
352
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
353
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
354
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
355
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
356
+
357
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
358
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
359
+
360
+ def forward(
361
+ self,
362
+ hidden_states: torch.Tensor,
363
+ key_value_states: Optional[torch.Tensor] = None,
364
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
365
+ attention_mask: Optional[torch.Tensor] = None,
366
+ layer_head_mask: Optional[torch.Tensor] = None,
367
+ output_attentions: bool = False,
368
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
369
+ """Input shape: Batch x Time x Channel"""
370
+
371
+ # if key_value_states are provided this layer is used as a cross-attention layer
372
+ # for the decoder
373
+ is_cross_attention = key_value_states is not None
374
+
375
+ bsz, tgt_len, _ = hidden_states.size()
376
+
377
+ # get query proj
378
+ query_states = self.q_proj(hidden_states) * self.scaling
379
+ # get key, value proj
380
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
381
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
382
+ # the provided `key_value_states` to support prefix tuning
383
+ if (
384
+ is_cross_attention
385
+ and past_key_value is not None
386
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
387
+ ):
388
+ # reuse k,v, cross_attentions
389
+ key_states = past_key_value[0]
390
+ value_states = past_key_value[1]
391
+ elif is_cross_attention:
392
+ # cross_attentions
393
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
394
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
395
+ elif past_key_value is not None:
396
+ # reuse k, v, self_attention
397
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
398
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
399
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
400
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
401
+ else:
402
+ # self_attention
403
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
404
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
405
+
406
+ if self.is_decoder:
407
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
408
+ # Further calls to cross_attention layer can then reuse all cross-attention
409
+ # key/value_states (first "if" case)
410
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
411
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
412
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
413
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
414
+ past_key_value = (key_states, value_states)
415
+
416
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
417
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
418
+ key_states = key_states.reshape(*proj_shape)
419
+ value_states = value_states.reshape(*proj_shape)
420
+
421
+ src_len = key_states.size(1)
422
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
423
+
424
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
425
+ raise ValueError(
426
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
427
+ f" {attn_weights.size()}"
428
+ )
429
+
430
+ if attention_mask is not None:
431
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
432
+ raise ValueError(
433
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
434
+ )
435
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
436
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
437
+
438
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
439
+
440
+ if layer_head_mask is not None:
441
+ if layer_head_mask.size() != (self.num_heads,):
442
+ raise ValueError(
443
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
444
+ f" {layer_head_mask.size()}"
445
+ )
446
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
447
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
448
+
449
+ if output_attentions:
450
+ # this operation is a bit awkward, but it's required to
451
+ # make sure that attn_weights keeps its gradient.
452
+ # In order to do so, attn_weights have to be reshaped
453
+ # twice and have to be reused in the following
454
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
455
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
456
+ else:
457
+ attn_weights_reshaped = None
458
+
459
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
460
+
461
+ attn_output = torch.bmm(attn_probs, value_states)
462
+
463
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
464
+ raise ValueError(
465
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
466
+ f" {attn_output.size()}"
467
+ )
468
+
469
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
470
+ attn_output = attn_output.transpose(1, 2)
471
+
472
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
473
+ # partitioned across GPUs when using tensor-parallelism.
474
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
475
+
476
+ attn_output = self.out_proj(attn_output)
477
+
478
+ return attn_output, attn_weights_reshaped, past_key_value
479
+
480
+
481
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Data2VecAudio
482
+ class Data2VecAudioFeedForward(nn.Module):
483
+ def __init__(self, config):
484
+ super().__init__()
485
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
486
+
487
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
488
+ if isinstance(config.hidden_act, str):
489
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
490
+ else:
491
+ self.intermediate_act_fn = config.hidden_act
492
+
493
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
494
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
495
+
496
+ def forward(self, hidden_states):
497
+ hidden_states = self.intermediate_dense(hidden_states)
498
+ hidden_states = self.intermediate_act_fn(hidden_states)
499
+ hidden_states = self.intermediate_dropout(hidden_states)
500
+
501
+ hidden_states = self.output_dense(hidden_states)
502
+ hidden_states = self.output_dropout(hidden_states)
503
+ return hidden_states
504
+
505
+
506
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->Data2VecAudio
507
+ class Data2VecAudioEncoderLayer(nn.Module):
508
+ def __init__(self, config):
509
+ super().__init__()
510
+ self.attention = Data2VecAudioAttention(
511
+ embed_dim=config.hidden_size,
512
+ num_heads=config.num_attention_heads,
513
+ dropout=config.attention_dropout,
514
+ is_decoder=False,
515
+ )
516
+ self.dropout = nn.Dropout(config.hidden_dropout)
517
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
518
+ self.feed_forward = Data2VecAudioFeedForward(config)
519
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
520
+
521
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
522
+ attn_residual = hidden_states
523
+ hidden_states, attn_weights, _ = self.attention(
524
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
525
+ )
526
+ hidden_states = self.dropout(hidden_states)
527
+ hidden_states = attn_residual + hidden_states
528
+
529
+ hidden_states = self.layer_norm(hidden_states)
530
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
531
+ hidden_states = self.final_layer_norm(hidden_states)
532
+
533
+ outputs = (hidden_states,)
534
+
535
+ if output_attentions:
536
+ outputs += (attn_weights,)
537
+
538
+ return outputs
539
+
540
+
541
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->Data2VecAudio
542
+ class Data2VecAudioEncoder(nn.Module):
543
+ def __init__(self, config):
544
+ super().__init__()
545
+ self.config = config
546
+ self.pos_conv_embed = Data2VecAudioPositionalConvEmbedding(config)
547
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
548
+ self.dropout = nn.Dropout(config.hidden_dropout)
549
+ self.layers = nn.ModuleList([Data2VecAudioEncoderLayer(config) for _ in range(config.num_hidden_layers)])
550
+ self.gradient_checkpointing = False
551
+
552
+ def forward(
553
+ self,
554
+ hidden_states: torch.tensor,
555
+ attention_mask: Optional[torch.Tensor] = None,
556
+ output_attentions: bool = False,
557
+ output_hidden_states: bool = False,
558
+ return_dict: bool = True,
559
+ ):
560
+ all_hidden_states = () if output_hidden_states else None
561
+ all_self_attentions = () if output_attentions else None
562
+
563
+ if attention_mask is not None:
564
+ # make sure padded tokens output 0
565
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
566
+ hidden_states[~expand_attention_mask] = 0
567
+
568
+ # extend attention_mask
569
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
570
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
571
+ attention_mask = attention_mask.expand(
572
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
573
+ )
574
+
575
+ position_embeddings = self.pos_conv_embed(hidden_states)
576
+ hidden_states = hidden_states + position_embeddings
577
+ hidden_states = self.layer_norm(hidden_states)
578
+ hidden_states = self.dropout(hidden_states)
579
+
580
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
581
+
582
+ for layer in self.layers:
583
+ if output_hidden_states:
584
+ all_hidden_states = all_hidden_states + (hidden_states,)
585
+
586
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
587
+ dropout_probability = torch.rand([])
588
+
589
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
590
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
591
+ # under deepspeed zero3 all gpus must run in sync
592
+ if self.gradient_checkpointing and self.training:
593
+ layer_outputs = self._gradient_checkpointing_func(
594
+ layer.__call__,
595
+ hidden_states,
596
+ attention_mask,
597
+ output_attentions,
598
+ )
599
+ else:
600
+ layer_outputs = layer(
601
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
602
+ )
603
+ hidden_states = layer_outputs[0]
604
+
605
+ if skip_the_layer:
606
+ layer_outputs = (None, None)
607
+
608
+ if output_attentions:
609
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
610
+
611
+ if output_hidden_states:
612
+ all_hidden_states = all_hidden_states + (hidden_states,)
613
+
614
+ if not return_dict:
615
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
616
+ return BaseModelOutput(
617
+ last_hidden_state=hidden_states,
618
+ hidden_states=all_hidden_states,
619
+ attentions=all_self_attentions,
620
+ )
621
+
622
+
623
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->Data2VecAudio
624
+ class Data2VecAudioAdapter(nn.Module):
625
+ def __init__(self, config):
626
+ super().__init__()
627
+
628
+ # feature dim might need to be down-projected
629
+ if config.output_hidden_size != config.hidden_size:
630
+ self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
631
+ self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
632
+ else:
633
+ self.proj = self.proj_layer_norm = None
634
+
635
+ self.layers = nn.ModuleList(Data2VecAudioAdapterLayer(config) for _ in range(config.num_adapter_layers))
636
+ self.layerdrop = config.layerdrop
637
+
638
+ def forward(self, hidden_states):
639
+ # down project hidden_states if necessary
640
+ if self.proj is not None and self.proj_layer_norm is not None:
641
+ hidden_states = self.proj(hidden_states)
642
+ hidden_states = self.proj_layer_norm(hidden_states)
643
+
644
+ hidden_states = hidden_states.transpose(1, 2)
645
+
646
+ for layer in self.layers:
647
+ layerdrop_prob = np.random.random()
648
+ if not self.training or (layerdrop_prob > self.layerdrop):
649
+ hidden_states = layer(hidden_states)
650
+
651
+ hidden_states = hidden_states.transpose(1, 2)
652
+ return hidden_states
653
+
654
+
655
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->Data2VecAudio
656
+ class Data2VecAudioAdapterLayer(nn.Module):
657
+ def __init__(self, config):
658
+ super().__init__()
659
+ self.conv = nn.Conv1d(
660
+ config.output_hidden_size,
661
+ 2 * config.output_hidden_size,
662
+ config.adapter_kernel_size,
663
+ stride=config.adapter_stride,
664
+ padding=1,
665
+ )
666
+
667
+ def forward(self, hidden_states):
668
+ hidden_states = self.conv(hidden_states)
669
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
670
+
671
+ return hidden_states
672
+
673
+
674
+ class Data2VecAudioPreTrainedModel(PreTrainedModel):
675
+ """
676
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
677
+ models.
678
+ """
679
+
680
+ config_class = Data2VecAudioConfig
681
+ base_model_prefix = "data2vec_audio"
682
+ main_input_name = "input_values"
683
+ supports_gradient_checkpointing = True
684
+
685
+ def _init_weights(self, module):
686
+ """Initialize the weights"""
687
+ if isinstance(module, Data2VecAudioFeatureProjection):
688
+ k = math.sqrt(1 / module.projection.in_features)
689
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
690
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
691
+ elif isinstance(module, Data2VecAudioPositionalConvLayer):
692
+ nn.init.constant_(module.conv.bias, 0)
693
+ elif isinstance(module, nn.Linear):
694
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
695
+
696
+ if module.bias is not None:
697
+ module.bias.data.zero_()
698
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
699
+ if module.bias is not None:
700
+ module.bias.data.zero_()
701
+ if module.weight is not None:
702
+ module.weight.data.fill_(1.0)
703
+ elif isinstance(module, nn.Conv1d):
704
+ nn.init.kaiming_normal_(module.weight)
705
+
706
+ if module.bias is not None:
707
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
708
+ nn.init.uniform_(module.bias, a=-k, b=k)
709
+
710
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel._get_feat_extract_output_lengths with
711
+ def _get_feat_extract_output_lengths(
712
+ self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None
713
+ ):
714
+ """
715
+ Computes the output length of the convolutional layers
716
+ """
717
+
718
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
719
+
720
+ def _conv_out_length(input_length, kernel_size, stride):
721
+ # 1D convolutional layer output length formula taken
722
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
723
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
724
+
725
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
726
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
727
+
728
+ if add_adapter:
729
+ for _ in range(self.config.num_adapter_layers):
730
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
731
+
732
+ return input_lengths
733
+
734
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel._get_feature_vector_attention_mask
735
+ def _get_feature_vector_attention_mask(
736
+ self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None
737
+ ):
738
+ # Effectively attention_mask.sum(-1), but not inplace to be able to run
739
+ # on inference mode.
740
+ non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
741
+
742
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
743
+ output_lengths = output_lengths.to(torch.long)
744
+
745
+ batch_size = attention_mask.shape[0]
746
+
747
+ attention_mask = torch.zeros(
748
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
749
+ )
750
+ # these two operations makes sure that all values before the output lengths idxs are attended to
751
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
752
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
753
+ return attention_mask
754
+
755
+
756
+ DATA2VEC_AUDIO_START_DOCSTRING = r"""
757
+ Data2VecAudio was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and
758
+ Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and
759
+ Michael Auli.
760
+
761
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
762
+ library implements for all its model (such as downloading or saving etc.).
763
+
764
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
765
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
766
+ behavior.
767
+
768
+ Parameters:
769
+ config ([`Data2VecAudioConfig`]): Model configuration class with all the parameters of the model.
770
+ Initializing with a config file does not load the weights associated with the model, only the
771
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
772
+ """
773
+
774
+
775
+ DATA2VEC_AUDIO_INPUTS_DOCSTRING = r"""
776
+ Args:
777
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
778
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
779
+ into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
780
+ soundfile*). To prepare the array into *input_values*, the [`AutoProcessor`] should be used for padding and
781
+ conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.
782
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
783
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
784
+ 1]`:
785
+
786
+ - 1 for tokens that are **not masked**,
787
+ - 0 for tokens that are **masked**.
788
+
789
+ [What are attention masks?](../glossary#attention-mask)
790
+
791
+ <Tip warning={true}>
792
+
793
+ `attention_mask` should be passed if the corresponding processor has `config.return_attention_mask ==
794
+ True`, which is the case for all pre-trained Data2Vec Audio models. Be aware that that even with
795
+ `attention_mask`, zero-padded inputs will have slightly different outputs compared to non-padded inputs
796
+ because there are more than one convolutional layer in the positional encodings. For a more detailed
797
+ explanation, see [here](https://github.com/huggingface/transformers/issues/25621#issuecomment-1713759349).
798
+
799
+ </Tip>
800
+
801
+ output_attentions (`bool`, *optional*):
802
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
803
+ tensors for more detail.
804
+ output_hidden_states (`bool`, *optional*):
805
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
806
+ more detail.
807
+ return_dict (`bool`, *optional*):
808
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
809
+ """
810
+
811
+
812
+ @add_start_docstrings(
813
+ "The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top.",
814
+ DATA2VEC_AUDIO_START_DOCSTRING,
815
+ )
816
+ class Data2VecAudioModel(Data2VecAudioPreTrainedModel):
817
+ def __init__(self, config: Data2VecAudioConfig):
818
+ super().__init__(config)
819
+ self.config = config
820
+ self.feature_extractor = Data2VecAudioFeatureEncoder(config)
821
+ self.feature_projection = Data2VecAudioFeatureProjection(config)
822
+
823
+ # model only needs masking vector if mask prob is > 0.0
824
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
825
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
826
+
827
+ self.encoder = Data2VecAudioEncoder(config)
828
+
829
+ self.adapter = Data2VecAudioAdapter(config) if config.add_adapter else None
830
+
831
+ # Initialize weights and apply final processing
832
+ self.post_init()
833
+
834
+ def freeze_feature_encoder(self):
835
+ """
836
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
837
+ not be updated during training.
838
+ """
839
+ self.feature_extractor._freeze_parameters()
840
+
841
+ def _mask_hidden_states(
842
+ self,
843
+ hidden_states: torch.FloatTensor,
844
+ mask_time_indices: Optional[torch.FloatTensor] = None,
845
+ attention_mask: Optional[torch.LongTensor] = None,
846
+ ):
847
+ """
848
+ Masks extracted features along time axis and/or along feature axis according to
849
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
850
+ """
851
+
852
+ # `config.apply_spec_augment` can set masking to False
853
+ if not getattr(self.config, "apply_spec_augment", True):
854
+ return hidden_states
855
+
856
+ # generate indices & apply SpecAugment along time axis
857
+ batch_size, sequence_length, hidden_size = hidden_states.size()
858
+
859
+ if mask_time_indices is not None:
860
+ # apply SpecAugment along time axis with given mask_time_indices
861
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
862
+ elif self.config.mask_time_prob > 0 and self.training:
863
+ mask_time_indices = _compute_mask_indices(
864
+ (batch_size, sequence_length),
865
+ mask_prob=self.config.mask_time_prob,
866
+ mask_length=self.config.mask_time_length,
867
+ attention_mask=attention_mask,
868
+ min_masks=self.config.mask_time_min_masks,
869
+ )
870
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
871
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
872
+
873
+ if self.config.mask_feature_prob > 0 and self.training:
874
+ # generate indices & apply SpecAugment along feature axis
875
+ mask_feature_indices = _compute_mask_indices(
876
+ (batch_size, hidden_size),
877
+ mask_prob=self.config.mask_feature_prob,
878
+ mask_length=self.config.mask_feature_length,
879
+ min_masks=self.config.mask_feature_min_masks,
880
+ )
881
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
882
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
883
+ hidden_states[mask_feature_indices] = 0
884
+
885
+ return hidden_states
886
+
887
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
888
+ @add_code_sample_docstrings(
889
+ checkpoint=_CHECKPOINT_FOR_DOC,
890
+ output_type=Wav2Vec2BaseModelOutput,
891
+ config_class=_CONFIG_FOR_DOC,
892
+ modality="audio",
893
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
894
+ )
895
+ def forward(
896
+ self,
897
+ input_values: Optional[torch.Tensor],
898
+ attention_mask: Optional[torch.Tensor] = None,
899
+ mask_time_indices: Optional[torch.FloatTensor] = None,
900
+ output_attentions: Optional[bool] = None,
901
+ output_hidden_states: Optional[bool] = None,
902
+ return_dict: Optional[bool] = None,
903
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
904
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
905
+ output_hidden_states = (
906
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
907
+ )
908
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
909
+
910
+ extract_features = self.feature_extractor(input_values)
911
+ extract_features = extract_features.transpose(1, 2)
912
+
913
+ if attention_mask is not None:
914
+ # compute reduced attention_mask corresponding to feature vectors
915
+ attention_mask = self._get_feature_vector_attention_mask(
916
+ extract_features.shape[1], attention_mask, add_adapter=False
917
+ )
918
+
919
+ hidden_states, extract_features = self.feature_projection(extract_features)
920
+ hidden_states = self._mask_hidden_states(
921
+ hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
922
+ )
923
+
924
+ encoder_outputs = self.encoder(
925
+ hidden_states,
926
+ attention_mask=attention_mask,
927
+ output_attentions=output_attentions,
928
+ output_hidden_states=output_hidden_states,
929
+ return_dict=return_dict,
930
+ )
931
+
932
+ hidden_states = encoder_outputs[0]
933
+
934
+ if self.adapter is not None:
935
+ hidden_states = self.adapter(hidden_states)
936
+
937
+ if not return_dict:
938
+ return (hidden_states, extract_features) + encoder_outputs[1:]
939
+
940
+ return Wav2Vec2BaseModelOutput(
941
+ last_hidden_state=hidden_states,
942
+ extract_features=extract_features,
943
+ hidden_states=encoder_outputs.hidden_states,
944
+ attentions=encoder_outputs.attentions,
945
+ )
946
+
947
+
948
+ @add_start_docstrings(
949
+ """Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
950
+ DATA2VEC_AUDIO_START_DOCSTRING,
951
+ )
952
+ class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):
953
+ def __init__(self, config):
954
+ super().__init__(config)
955
+
956
+ self.data2vec_audio = Data2VecAudioModel(config)
957
+ self.dropout = nn.Dropout(config.final_dropout)
958
+
959
+ if config.vocab_size is None:
960
+ raise ValueError(
961
+ f"You are trying to instantiate {self.__class__} with a configuration that "
962
+ "does not define the vocabulary size of the language model head. Please "
963
+ "instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
964
+ "or define `vocab_size` of your model's configuration."
965
+ )
966
+ output_hidden_size = (
967
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
968
+ )
969
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
970
+
971
+ # Initialize weights and apply final processing
972
+ self.post_init()
973
+
974
+ def freeze_feature_extractor(self):
975
+ """
976
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
977
+ not be updated during training.
978
+ """
979
+ warnings.warn(
980
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
981
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
982
+ FutureWarning,
983
+ )
984
+ self.freeze_feature_encoder()
985
+
986
+ def freeze_feature_encoder(self):
987
+ """
988
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
989
+ not be updated during training.
990
+ """
991
+ self.data2vec_audio.feature_extractor._freeze_parameters()
992
+
993
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
994
+ @add_code_sample_docstrings(
995
+ checkpoint=_CHECKPOINT_FOR_DOC,
996
+ output_type=CausalLMOutput,
997
+ config_class=_CONFIG_FOR_DOC,
998
+ expected_output=_CTC_EXPECTED_OUTPUT,
999
+ expected_loss=_CTC_EXPECTED_LOSS,
1000
+ )
1001
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward with wav2vec2->data2vec_audio
1002
+ def forward(
1003
+ self,
1004
+ input_values: Optional[torch.Tensor],
1005
+ attention_mask: Optional[torch.Tensor] = None,
1006
+ output_attentions: Optional[bool] = None,
1007
+ output_hidden_states: Optional[bool] = None,
1008
+ return_dict: Optional[bool] = None,
1009
+ labels: Optional[torch.Tensor] = None,
1010
+ ) -> Union[Tuple, CausalLMOutput]:
1011
+ r"""
1012
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
1013
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
1014
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
1015
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1016
+ config.vocab_size - 1]`.
1017
+ """
1018
+
1019
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1020
+
1021
+ outputs = self.data2vec_audio(
1022
+ input_values,
1023
+ attention_mask=attention_mask,
1024
+ output_attentions=output_attentions,
1025
+ output_hidden_states=output_hidden_states,
1026
+ return_dict=return_dict,
1027
+ )
1028
+
1029
+ hidden_states = outputs[0]
1030
+ hidden_states = self.dropout(hidden_states)
1031
+
1032
+ logits = self.lm_head(hidden_states)
1033
+
1034
+ loss = None
1035
+ if labels is not None:
1036
+ if labels.max() >= self.config.vocab_size:
1037
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1038
+
1039
+ # retrieve loss input_lengths from attention_mask
1040
+ attention_mask = (
1041
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
1042
+ )
1043
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
1044
+
1045
+ # assuming that padded tokens are filled with -100
1046
+ # when not being attended to
1047
+ labels_mask = labels >= 0
1048
+ target_lengths = labels_mask.sum(-1)
1049
+ flattened_targets = labels.masked_select(labels_mask)
1050
+
1051
+ # ctc_loss doesn't support fp16
1052
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
1053
+
1054
+ with torch.backends.cudnn.flags(enabled=False):
1055
+ loss = nn.functional.ctc_loss(
1056
+ log_probs,
1057
+ flattened_targets,
1058
+ input_lengths,
1059
+ target_lengths,
1060
+ blank=self.config.pad_token_id,
1061
+ reduction=self.config.ctc_loss_reduction,
1062
+ zero_infinity=self.config.ctc_zero_infinity,
1063
+ )
1064
+
1065
+ if not return_dict:
1066
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1067
+ return ((loss,) + output) if loss is not None else output
1068
+
1069
+ return CausalLMOutput(
1070
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1071
+ )
1072
+
1073
+
1074
+ @add_start_docstrings(
1075
+ """
1076
+ Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
1077
+ like SUPERB Keyword Spotting.
1078
+ """,
1079
+ DATA2VEC_AUDIO_START_DOCSTRING,
1080
+ )
1081
+ class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):
1082
+ def __init__(self, config):
1083
+ super().__init__(config)
1084
+
1085
+ if hasattr(config, "add_adapter") and config.add_adapter:
1086
+ raise ValueError(
1087
+ "Sequence classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)"
1088
+ )
1089
+ self.data2vec_audio = Data2VecAudioModel(config)
1090
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1091
+ if config.use_weighted_layer_sum:
1092
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1093
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
1094
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
1095
+
1096
+ # Initialize weights and apply final processing
1097
+ self.post_init()
1098
+
1099
+ def freeze_feature_extractor(self):
1100
+ """
1101
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1102
+ not be updated during training.
1103
+ """
1104
+ warnings.warn(
1105
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1106
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1107
+ FutureWarning,
1108
+ )
1109
+ self.freeze_feature_encoder()
1110
+
1111
+ def freeze_feature_encoder(self):
1112
+ """
1113
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1114
+ not be updated during training.
1115
+ """
1116
+ self.data2vec_audio.feature_extractor._freeze_parameters()
1117
+
1118
+ def freeze_base_model(self):
1119
+ """
1120
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1121
+ be updated during training. Only the classification head will be updated.
1122
+ """
1123
+ for param in self.data2vec_audio.parameters():
1124
+ param.requires_grad = False
1125
+
1126
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
1127
+ @add_code_sample_docstrings(
1128
+ checkpoint=_CHECKPOINT_FOR_DOC,
1129
+ output_type=SequenceClassifierOutput,
1130
+ config_class=_CONFIG_FOR_DOC,
1131
+ modality="audio",
1132
+ )
1133
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with wav2vec2->data2vec_audio
1134
+ def forward(
1135
+ self,
1136
+ input_values: Optional[torch.Tensor],
1137
+ attention_mask: Optional[torch.Tensor] = None,
1138
+ output_attentions: Optional[bool] = None,
1139
+ output_hidden_states: Optional[bool] = None,
1140
+ return_dict: Optional[bool] = None,
1141
+ labels: Optional[torch.Tensor] = None,
1142
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1143
+ r"""
1144
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1145
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1146
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1147
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1148
+ """
1149
+
1150
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1151
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1152
+
1153
+ outputs = self.data2vec_audio(
1154
+ input_values,
1155
+ attention_mask=attention_mask,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ )
1160
+
1161
+ if self.config.use_weighted_layer_sum:
1162
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1163
+ hidden_states = torch.stack(hidden_states, dim=1)
1164
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1165
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1166
+ else:
1167
+ hidden_states = outputs[0]
1168
+
1169
+ hidden_states = self.projector(hidden_states)
1170
+ if attention_mask is None:
1171
+ pooled_output = hidden_states.mean(dim=1)
1172
+ else:
1173
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
1174
+ hidden_states[~padding_mask] = 0.0
1175
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
1176
+
1177
+ logits = self.classifier(pooled_output)
1178
+
1179
+ loss = None
1180
+ if labels is not None:
1181
+ loss_fct = CrossEntropyLoss()
1182
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1183
+
1184
+ if not return_dict:
1185
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1186
+ return ((loss,) + output) if loss is not None else output
1187
+
1188
+ return SequenceClassifierOutput(
1189
+ loss=loss,
1190
+ logits=logits,
1191
+ hidden_states=outputs.hidden_states,
1192
+ attentions=outputs.attentions,
1193
+ )
1194
+
1195
+
1196
+ @add_start_docstrings(
1197
+ """
1198
+ Data2VecAudio Model with a frame classification head on top for tasks like Speaker Diarization.
1199
+ """,
1200
+ DATA2VEC_AUDIO_START_DOCSTRING,
1201
+ )
1202
+ class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):
1203
+ def __init__(self, config):
1204
+ super().__init__(config)
1205
+
1206
+ if hasattr(config, "add_adapter") and config.add_adapter:
1207
+ raise ValueError(
1208
+ "Audio frame classification does not support the use of Data2VecAudio adapters"
1209
+ " (config.add_adapter=True)"
1210
+ )
1211
+ self.data2vec_audio = Data2VecAudioModel(config)
1212
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1213
+ if config.use_weighted_layer_sum:
1214
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1215
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1216
+ self.num_labels = config.num_labels
1217
+
1218
+ self.init_weights()
1219
+
1220
+ def freeze_feature_extractor(self):
1221
+ """
1222
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1223
+ not be updated during training.
1224
+ """
1225
+ warnings.warn(
1226
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1227
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1228
+ FutureWarning,
1229
+ )
1230
+ self.freeze_feature_encoder()
1231
+
1232
+ def freeze_feature_encoder(self):
1233
+ """
1234
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1235
+ not be updated during training.
1236
+ """
1237
+ self.data2vec_audio.feature_extractor._freeze_parameters()
1238
+
1239
+ def freeze_base_model(self):
1240
+ """
1241
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1242
+ be updated during training. Only the classification head will be updated.
1243
+ """
1244
+ for param in self.data2vec_audio.parameters():
1245
+ param.requires_grad = False
1246
+
1247
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
1248
+ @add_code_sample_docstrings(
1249
+ checkpoint=_CHECKPOINT_FOR_DOC,
1250
+ output_type=TokenClassifierOutput,
1251
+ config_class=_CONFIG_FOR_DOC,
1252
+ modality="audio",
1253
+ )
1254
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification.forward with wav2vec2->data2vec_audio
1255
+ def forward(
1256
+ self,
1257
+ input_values: Optional[torch.Tensor],
1258
+ attention_mask: Optional[torch.Tensor] = None,
1259
+ labels: Optional[torch.Tensor] = None,
1260
+ output_attentions: Optional[bool] = None,
1261
+ output_hidden_states: Optional[bool] = None,
1262
+ return_dict: Optional[bool] = None,
1263
+ ) -> Union[Tuple, TokenClassifierOutput]:
1264
+ r"""
1265
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1266
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1267
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1268
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1269
+ """
1270
+
1271
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1272
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1273
+
1274
+ outputs = self.data2vec_audio(
1275
+ input_values,
1276
+ attention_mask=attention_mask,
1277
+ output_attentions=output_attentions,
1278
+ output_hidden_states=output_hidden_states,
1279
+ return_dict=return_dict,
1280
+ )
1281
+
1282
+ if self.config.use_weighted_layer_sum:
1283
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1284
+ hidden_states = torch.stack(hidden_states, dim=1)
1285
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1286
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1287
+ else:
1288
+ hidden_states = outputs[0]
1289
+
1290
+ logits = self.classifier(hidden_states)
1291
+
1292
+ loss = None
1293
+ if labels is not None:
1294
+ loss_fct = CrossEntropyLoss()
1295
+ loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
1296
+
1297
+ if not return_dict:
1298
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1299
+ return output
1300
+
1301
+ return TokenClassifierOutput(
1302
+ loss=loss,
1303
+ logits=logits,
1304
+ hidden_states=outputs.hidden_states,
1305
+ attentions=outputs.attentions,
1306
+ )
1307
+
1308
+
1309
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss
1310
+ class AMSoftmaxLoss(nn.Module):
1311
+ def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
1312
+ super(AMSoftmaxLoss, self).__init__()
1313
+ self.scale = scale
1314
+ self.margin = margin
1315
+ self.num_labels = num_labels
1316
+ self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
1317
+ self.loss = nn.CrossEntropyLoss()
1318
+
1319
+ def forward(self, hidden_states, labels):
1320
+ labels = labels.flatten()
1321
+ weight = nn.functional.normalize(self.weight, dim=0)
1322
+ hidden_states = nn.functional.normalize(hidden_states, dim=1)
1323
+ cos_theta = torch.mm(hidden_states, weight)
1324
+ psi = cos_theta - self.margin
1325
+
1326
+ onehot = nn.functional.one_hot(labels, self.num_labels)
1327
+ logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
1328
+ loss = self.loss(logits, labels)
1329
+
1330
+ return loss
1331
+
1332
+
1333
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer
1334
+ class TDNNLayer(nn.Module):
1335
+ def __init__(self, config, layer_id=0):
1336
+ super().__init__()
1337
+ self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
1338
+ self.out_conv_dim = config.tdnn_dim[layer_id]
1339
+ self.kernel_size = config.tdnn_kernel[layer_id]
1340
+ self.dilation = config.tdnn_dilation[layer_id]
1341
+
1342
+ self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
1343
+ self.activation = nn.ReLU()
1344
+
1345
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
1346
+ if is_peft_available():
1347
+ from peft.tuners.lora import LoraLayer
1348
+
1349
+ if isinstance(self.kernel, LoraLayer):
1350
+ warnings.warn(
1351
+ "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. "
1352
+ "You should exclude TDNNLayer from LoRA's target modules.",
1353
+ )
1354
+
1355
+ # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up
1356
+ hidden_states = hidden_states.transpose(1, 2)
1357
+ weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
1358
+ hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
1359
+ hidden_states = hidden_states.transpose(1, 2)
1360
+
1361
+ hidden_states = self.activation(hidden_states)
1362
+ return hidden_states
1363
+
1364
+
1365
+ @add_start_docstrings(
1366
+ """
1367
+ Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.
1368
+ """,
1369
+ DATA2VEC_AUDIO_START_DOCSTRING,
1370
+ )
1371
+ class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):
1372
+ def __init__(self, config):
1373
+ super().__init__(config)
1374
+
1375
+ self.data2vec_audio = Data2VecAudioModel(config)
1376
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1377
+ if config.use_weighted_layer_sum:
1378
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1379
+ self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
1380
+
1381
+ tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
1382
+ self.tdnn = nn.ModuleList(tdnn_layers)
1383
+
1384
+ self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
1385
+ self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
1386
+
1387
+ self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
1388
+
1389
+ self.init_weights()
1390
+
1391
+ def freeze_feature_extractor(self):
1392
+ """
1393
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1394
+ not be updated during training.
1395
+ """
1396
+ warnings.warn(
1397
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1398
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1399
+ FutureWarning,
1400
+ )
1401
+ self.freeze_feature_encoder()
1402
+
1403
+ def freeze_feature_encoder(self):
1404
+ """
1405
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1406
+ not be updated during training.
1407
+ """
1408
+ self.data2vec_audio.feature_extractor._freeze_parameters()
1409
+
1410
+ def freeze_base_model(self):
1411
+ """
1412
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1413
+ be updated during training. Only the classification head will be updated.
1414
+ """
1415
+ for param in self.data2vec_audio.parameters():
1416
+ param.requires_grad = False
1417
+
1418
+ def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
1419
+ """
1420
+ Computes the output length of the TDNN layers
1421
+ """
1422
+
1423
+ def _conv_out_length(input_length, kernel_size, stride):
1424
+ # 1D convolutional layer output length formula taken
1425
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1426
+ return (input_length - kernel_size) // stride + 1
1427
+
1428
+ for kernel_size in self.config.tdnn_kernel:
1429
+ input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
1430
+
1431
+ return input_lengths
1432
+
1433
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
1434
+ @add_code_sample_docstrings(
1435
+ checkpoint=_CHECKPOINT_FOR_DOC,
1436
+ output_type=XVectorOutput,
1437
+ config_class=_CONFIG_FOR_DOC,
1438
+ modality="audio",
1439
+ )
1440
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector.forward with wav2vec2->data2vec_audio
1441
+ def forward(
1442
+ self,
1443
+ input_values: Optional[torch.Tensor],
1444
+ attention_mask: Optional[torch.Tensor] = None,
1445
+ output_attentions: Optional[bool] = None,
1446
+ output_hidden_states: Optional[bool] = None,
1447
+ return_dict: Optional[bool] = None,
1448
+ labels: Optional[torch.Tensor] = None,
1449
+ ) -> Union[Tuple, XVectorOutput]:
1450
+ r"""
1451
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1452
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1453
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1454
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1455
+ """
1456
+
1457
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1458
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1459
+
1460
+ outputs = self.data2vec_audio(
1461
+ input_values,
1462
+ attention_mask=attention_mask,
1463
+ output_attentions=output_attentions,
1464
+ output_hidden_states=output_hidden_states,
1465
+ return_dict=return_dict,
1466
+ )
1467
+
1468
+ if self.config.use_weighted_layer_sum:
1469
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1470
+ hidden_states = torch.stack(hidden_states, dim=1)
1471
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1472
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1473
+ else:
1474
+ hidden_states = outputs[0]
1475
+
1476
+ hidden_states = self.projector(hidden_states)
1477
+
1478
+ for tdnn_layer in self.tdnn:
1479
+ hidden_states = tdnn_layer(hidden_states)
1480
+
1481
+ # Statistic Pooling
1482
+ if attention_mask is None:
1483
+ mean_features = hidden_states.mean(dim=1)
1484
+ std_features = hidden_states.std(dim=1)
1485
+ else:
1486
+ feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
1487
+ tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
1488
+ mean_features = []
1489
+ std_features = []
1490
+ for i, length in enumerate(tdnn_output_lengths):
1491
+ mean_features.append(hidden_states[i, :length].mean(dim=0))
1492
+ std_features.append(hidden_states[i, :length].std(dim=0))
1493
+ mean_features = torch.stack(mean_features)
1494
+ std_features = torch.stack(std_features)
1495
+ statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
1496
+
1497
+ output_embeddings = self.feature_extractor(statistic_pooling)
1498
+ logits = self.classifier(output_embeddings)
1499
+
1500
+ loss = None
1501
+ if labels is not None:
1502
+ loss = self.objective(logits, labels)
1503
+
1504
+ if not return_dict:
1505
+ output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
1506
+ return ((loss,) + output) if loss is not None else output
1507
+
1508
+ return XVectorOutput(
1509
+ loss=loss,
1510
+ logits=logits,
1511
+ embeddings=output_embeddings,
1512
+ hidden_states=outputs.hidden_states,
1513
+ attentions=outputs.attentions,
1514
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_text.py ADDED
@@ -0,0 +1,1557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Data2VecText model."""
16
+
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN, gelu
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ CausalLMOutputWithCrossAttentions,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import (
39
+ add_code_sample_docstrings,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_data2vec_text import Data2VecTextConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ _HIDDEN_STATES_START_POSITION = 2
52
+
53
+ # General docstring
54
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-text-base"
55
+ _CONFIG_FOR_DOC = "Data2VecTextConfig"
56
+
57
+
58
+ from ..deprecated._archive_maps import DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Data2VecText
62
+ class Data2VecTextForTextEmbeddings(nn.Module):
63
+ """
64
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
65
+ """
66
+
67
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
68
+ def __init__(self, config):
69
+ super().__init__()
70
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
71
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
72
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
73
+
74
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
75
+ # any TensorFlow checkpoint file
76
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
77
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
78
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
79
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
80
+ self.register_buffer(
81
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
82
+ )
83
+ self.register_buffer(
84
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
85
+ )
86
+
87
+ # End copy
88
+ self.padding_idx = config.pad_token_id
89
+ self.position_embeddings = nn.Embedding(
90
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
91
+ )
92
+
93
+ def forward(
94
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
95
+ ):
96
+ if position_ids is None:
97
+ if input_ids is not None:
98
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
99
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
100
+ else:
101
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
102
+
103
+ if input_ids is not None:
104
+ input_shape = input_ids.size()
105
+ else:
106
+ input_shape = inputs_embeds.size()[:-1]
107
+
108
+ seq_length = input_shape[1]
109
+
110
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
111
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
112
+ # issue #5664
113
+ if token_type_ids is None:
114
+ if hasattr(self, "token_type_ids"):
115
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
116
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
117
+ token_type_ids = buffered_token_type_ids_expanded
118
+ else:
119
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
120
+
121
+ if inputs_embeds is None:
122
+ inputs_embeds = self.word_embeddings(input_ids)
123
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
124
+
125
+ embeddings = inputs_embeds + token_type_embeddings
126
+ if self.position_embedding_type == "absolute":
127
+ position_embeddings = self.position_embeddings(position_ids)
128
+ embeddings += position_embeddings
129
+ embeddings = self.LayerNorm(embeddings)
130
+ embeddings = self.dropout(embeddings)
131
+ return embeddings
132
+
133
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
134
+ """
135
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
136
+
137
+ Args:
138
+ inputs_embeds: torch.Tensor
139
+
140
+ Returns: torch.Tensor
141
+ """
142
+ input_shape = inputs_embeds.size()[:-1]
143
+ sequence_length = input_shape[1]
144
+
145
+ position_ids = torch.arange(
146
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
147
+ )
148
+ return position_ids.unsqueeze(0).expand(input_shape)
149
+
150
+
151
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Data2VecText
152
+ class Data2VecTextSelfAttention(nn.Module):
153
+ def __init__(self, config, position_embedding_type=None):
154
+ super().__init__()
155
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
156
+ raise ValueError(
157
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
158
+ f"heads ({config.num_attention_heads})"
159
+ )
160
+
161
+ self.num_attention_heads = config.num_attention_heads
162
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
163
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
164
+
165
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
166
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
167
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
168
+
169
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
170
+ self.position_embedding_type = position_embedding_type or getattr(
171
+ config, "position_embedding_type", "absolute"
172
+ )
173
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
174
+ self.max_position_embeddings = config.max_position_embeddings
175
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
176
+
177
+ self.is_decoder = config.is_decoder
178
+
179
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
180
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
181
+ x = x.view(new_x_shape)
182
+ return x.permute(0, 2, 1, 3)
183
+
184
+ def forward(
185
+ self,
186
+ hidden_states: torch.Tensor,
187
+ attention_mask: Optional[torch.FloatTensor] = None,
188
+ head_mask: Optional[torch.FloatTensor] = None,
189
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
190
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
191
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
192
+ output_attentions: Optional[bool] = False,
193
+ ) -> Tuple[torch.Tensor]:
194
+ mixed_query_layer = self.query(hidden_states)
195
+
196
+ # If this is instantiated as a cross-attention module, the keys
197
+ # and values come from an encoder; the attention mask needs to be
198
+ # such that the encoder's padding tokens are not attended to.
199
+ is_cross_attention = encoder_hidden_states is not None
200
+
201
+ if is_cross_attention and past_key_value is not None:
202
+ # reuse k,v, cross_attentions
203
+ key_layer = past_key_value[0]
204
+ value_layer = past_key_value[1]
205
+ attention_mask = encoder_attention_mask
206
+ elif is_cross_attention:
207
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
208
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
209
+ attention_mask = encoder_attention_mask
210
+ elif past_key_value is not None:
211
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
212
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
213
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
214
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
215
+ else:
216
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
217
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
218
+
219
+ query_layer = self.transpose_for_scores(mixed_query_layer)
220
+
221
+ use_cache = past_key_value is not None
222
+ if self.is_decoder:
223
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
224
+ # Further calls to cross_attention layer can then reuse all cross-attention
225
+ # key/value_states (first "if" case)
226
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
227
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
228
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
229
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
230
+ past_key_value = (key_layer, value_layer)
231
+
232
+ # Take the dot product between "query" and "key" to get the raw attention scores.
233
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
234
+
235
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
236
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
237
+ if use_cache:
238
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
239
+ -1, 1
240
+ )
241
+ else:
242
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
243
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
244
+ distance = position_ids_l - position_ids_r
245
+
246
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
247
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
248
+
249
+ if self.position_embedding_type == "relative_key":
250
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
251
+ attention_scores = attention_scores + relative_position_scores
252
+ elif self.position_embedding_type == "relative_key_query":
253
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
254
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
255
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
256
+
257
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
258
+ if attention_mask is not None:
259
+ # Apply the attention mask is (precomputed for all layers in Data2VecTextModel forward() function)
260
+ attention_scores = attention_scores + attention_mask
261
+
262
+ # Normalize the attention scores to probabilities.
263
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
264
+
265
+ # This is actually dropping out entire tokens to attend to, which might
266
+ # seem a bit unusual, but is taken from the original Transformer paper.
267
+ attention_probs = self.dropout(attention_probs)
268
+
269
+ # Mask heads if we want to
270
+ if head_mask is not None:
271
+ attention_probs = attention_probs * head_mask
272
+
273
+ context_layer = torch.matmul(attention_probs, value_layer)
274
+
275
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
276
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
277
+ context_layer = context_layer.view(new_context_layer_shape)
278
+
279
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
280
+
281
+ if self.is_decoder:
282
+ outputs = outputs + (past_key_value,)
283
+ return outputs
284
+
285
+
286
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
287
+ class Data2VecTextSelfOutput(nn.Module):
288
+ def __init__(self, config):
289
+ super().__init__()
290
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
291
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
292
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
293
+
294
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
295
+ hidden_states = self.dense(hidden_states)
296
+ hidden_states = self.dropout(hidden_states)
297
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
298
+ return hidden_states
299
+
300
+
301
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Data2VecText
302
+ class Data2VecTextAttention(nn.Module):
303
+ def __init__(self, config, position_embedding_type=None):
304
+ super().__init__()
305
+ self.self = Data2VecTextSelfAttention(config, position_embedding_type=position_embedding_type)
306
+ self.output = Data2VecTextSelfOutput(config)
307
+ self.pruned_heads = set()
308
+
309
+ def prune_heads(self, heads):
310
+ if len(heads) == 0:
311
+ return
312
+ heads, index = find_pruneable_heads_and_indices(
313
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
314
+ )
315
+
316
+ # Prune linear layers
317
+ self.self.query = prune_linear_layer(self.self.query, index)
318
+ self.self.key = prune_linear_layer(self.self.key, index)
319
+ self.self.value = prune_linear_layer(self.self.value, index)
320
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
321
+
322
+ # Update hyper params and store pruned heads
323
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
324
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
325
+ self.pruned_heads = self.pruned_heads.union(heads)
326
+
327
+ def forward(
328
+ self,
329
+ hidden_states: torch.Tensor,
330
+ attention_mask: Optional[torch.FloatTensor] = None,
331
+ head_mask: Optional[torch.FloatTensor] = None,
332
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
333
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
334
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
335
+ output_attentions: Optional[bool] = False,
336
+ ) -> Tuple[torch.Tensor]:
337
+ self_outputs = self.self(
338
+ hidden_states,
339
+ attention_mask,
340
+ head_mask,
341
+ encoder_hidden_states,
342
+ encoder_attention_mask,
343
+ past_key_value,
344
+ output_attentions,
345
+ )
346
+ attention_output = self.output(self_outputs[0], hidden_states)
347
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
348
+ return outputs
349
+
350
+
351
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
352
+ class Data2VecTextIntermediate(nn.Module):
353
+ def __init__(self, config):
354
+ super().__init__()
355
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
356
+ if isinstance(config.hidden_act, str):
357
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
358
+ else:
359
+ self.intermediate_act_fn = config.hidden_act
360
+
361
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
362
+ hidden_states = self.dense(hidden_states)
363
+ hidden_states = self.intermediate_act_fn(hidden_states)
364
+ return hidden_states
365
+
366
+
367
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
368
+ class Data2VecTextOutput(nn.Module):
369
+ def __init__(self, config):
370
+ super().__init__()
371
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
372
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
373
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
374
+
375
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
376
+ hidden_states = self.dense(hidden_states)
377
+ hidden_states = self.dropout(hidden_states)
378
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
379
+ return hidden_states
380
+
381
+
382
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Data2VecText
383
+ class Data2VecTextLayer(nn.Module):
384
+ def __init__(self, config):
385
+ super().__init__()
386
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
387
+ self.seq_len_dim = 1
388
+ self.attention = Data2VecTextAttention(config)
389
+ self.is_decoder = config.is_decoder
390
+ self.add_cross_attention = config.add_cross_attention
391
+ if self.add_cross_attention:
392
+ if not self.is_decoder:
393
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
394
+ self.crossattention = Data2VecTextAttention(config, position_embedding_type="absolute")
395
+ self.intermediate = Data2VecTextIntermediate(config)
396
+ self.output = Data2VecTextOutput(config)
397
+
398
+ def forward(
399
+ self,
400
+ hidden_states: torch.Tensor,
401
+ attention_mask: Optional[torch.FloatTensor] = None,
402
+ head_mask: Optional[torch.FloatTensor] = None,
403
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
404
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
405
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
406
+ output_attentions: Optional[bool] = False,
407
+ ) -> Tuple[torch.Tensor]:
408
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
409
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
410
+ self_attention_outputs = self.attention(
411
+ hidden_states,
412
+ attention_mask,
413
+ head_mask,
414
+ output_attentions=output_attentions,
415
+ past_key_value=self_attn_past_key_value,
416
+ )
417
+ attention_output = self_attention_outputs[0]
418
+
419
+ # if decoder, the last output is tuple of self-attn cache
420
+ if self.is_decoder:
421
+ outputs = self_attention_outputs[1:-1]
422
+ present_key_value = self_attention_outputs[-1]
423
+ else:
424
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
425
+
426
+ cross_attn_present_key_value = None
427
+ if self.is_decoder and encoder_hidden_states is not None:
428
+ if not hasattr(self, "crossattention"):
429
+ raise ValueError(
430
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
431
+ " by setting `config.add_cross_attention=True`"
432
+ )
433
+
434
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
435
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
436
+ cross_attention_outputs = self.crossattention(
437
+ attention_output,
438
+ attention_mask,
439
+ head_mask,
440
+ encoder_hidden_states,
441
+ encoder_attention_mask,
442
+ cross_attn_past_key_value,
443
+ output_attentions,
444
+ )
445
+ attention_output = cross_attention_outputs[0]
446
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
447
+
448
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
449
+ cross_attn_present_key_value = cross_attention_outputs[-1]
450
+ present_key_value = present_key_value + cross_attn_present_key_value
451
+
452
+ layer_output = apply_chunking_to_forward(
453
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
454
+ )
455
+ outputs = (layer_output,) + outputs
456
+
457
+ # if decoder, return the attn key/values as the last output
458
+ if self.is_decoder:
459
+ outputs = outputs + (present_key_value,)
460
+
461
+ return outputs
462
+
463
+ def feed_forward_chunk(self, attention_output):
464
+ intermediate_output = self.intermediate(attention_output)
465
+ layer_output = self.output(intermediate_output, attention_output)
466
+ return layer_output
467
+
468
+
469
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Data2VecText
470
+ class Data2VecTextEncoder(nn.Module):
471
+ def __init__(self, config):
472
+ super().__init__()
473
+ self.config = config
474
+ self.layer = nn.ModuleList([Data2VecTextLayer(config) for _ in range(config.num_hidden_layers)])
475
+ self.gradient_checkpointing = False
476
+
477
+ def forward(
478
+ self,
479
+ hidden_states: torch.Tensor,
480
+ attention_mask: Optional[torch.FloatTensor] = None,
481
+ head_mask: Optional[torch.FloatTensor] = None,
482
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
483
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
484
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
485
+ use_cache: Optional[bool] = None,
486
+ output_attentions: Optional[bool] = False,
487
+ output_hidden_states: Optional[bool] = False,
488
+ return_dict: Optional[bool] = True,
489
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
490
+ all_hidden_states = () if output_hidden_states else None
491
+ all_self_attentions = () if output_attentions else None
492
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
493
+
494
+ if self.gradient_checkpointing and self.training:
495
+ if use_cache:
496
+ logger.warning_once(
497
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
498
+ )
499
+ use_cache = False
500
+
501
+ next_decoder_cache = () if use_cache else None
502
+ for i, layer_module in enumerate(self.layer):
503
+ if output_hidden_states:
504
+ all_hidden_states = all_hidden_states + (hidden_states,)
505
+
506
+ layer_head_mask = head_mask[i] if head_mask is not None else None
507
+ past_key_value = past_key_values[i] if past_key_values is not None else None
508
+
509
+ if self.gradient_checkpointing and self.training:
510
+ layer_outputs = self._gradient_checkpointing_func(
511
+ layer_module.__call__,
512
+ hidden_states,
513
+ attention_mask,
514
+ layer_head_mask,
515
+ encoder_hidden_states,
516
+ encoder_attention_mask,
517
+ past_key_value,
518
+ output_attentions,
519
+ )
520
+ else:
521
+ layer_outputs = layer_module(
522
+ hidden_states,
523
+ attention_mask,
524
+ layer_head_mask,
525
+ encoder_hidden_states,
526
+ encoder_attention_mask,
527
+ past_key_value,
528
+ output_attentions,
529
+ )
530
+
531
+ hidden_states = layer_outputs[0]
532
+ if use_cache:
533
+ next_decoder_cache += (layer_outputs[-1],)
534
+ if output_attentions:
535
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
536
+ if self.config.add_cross_attention:
537
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
538
+
539
+ if output_hidden_states:
540
+ all_hidden_states = all_hidden_states + (hidden_states,)
541
+
542
+ if not return_dict:
543
+ return tuple(
544
+ v
545
+ for v in [
546
+ hidden_states,
547
+ next_decoder_cache,
548
+ all_hidden_states,
549
+ all_self_attentions,
550
+ all_cross_attentions,
551
+ ]
552
+ if v is not None
553
+ )
554
+ return BaseModelOutputWithPastAndCrossAttentions(
555
+ last_hidden_state=hidden_states,
556
+ past_key_values=next_decoder_cache,
557
+ hidden_states=all_hidden_states,
558
+ attentions=all_self_attentions,
559
+ cross_attentions=all_cross_attentions,
560
+ )
561
+
562
+
563
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
564
+ class Data2VecTextPooler(nn.Module):
565
+ def __init__(self, config):
566
+ super().__init__()
567
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
568
+ self.activation = nn.Tanh()
569
+
570
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
571
+ # We "pool" the model by simply taking the hidden state corresponding
572
+ # to the first token.
573
+ first_token_tensor = hidden_states[:, 0]
574
+ pooled_output = self.dense(first_token_tensor)
575
+ pooled_output = self.activation(pooled_output)
576
+ return pooled_output
577
+
578
+
579
+ class Data2VecTextPreTrainedModel(PreTrainedModel):
580
+ """
581
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
582
+ models.
583
+ """
584
+
585
+ config_class = Data2VecTextConfig
586
+ base_model_prefix = "data2vec_text"
587
+ supports_gradient_checkpointing = True
588
+ _no_split_modules = ["Data2VecTextForTextEmbeddings", "Data2VecTextLayer"]
589
+
590
+ def _init_weights(self, module):
591
+ """Initialize the weights"""
592
+ if isinstance(module, nn.Linear):
593
+ # Slightly different from the TF version which uses truncated_normal for initialization
594
+ # cf https://github.com/pytorch/pytorch/pull/5617
595
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
596
+ if module.bias is not None:
597
+ module.bias.data.zero_()
598
+ elif isinstance(module, nn.Embedding):
599
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
600
+ if module.padding_idx is not None:
601
+ module.weight.data[module.padding_idx].zero_()
602
+ elif isinstance(module, nn.LayerNorm):
603
+ if hasattr(module, "bias") and module.bias is not None:
604
+ module.bias.data.zero_()
605
+ if hasattr(module, "weight") and module.weight is not None:
606
+ module.weight.data.fill_(1.0)
607
+
608
+
609
+ DATA2VECTEXT_START_DOCSTRING = r"""
610
+ Data2VecText was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and
611
+ Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and
612
+ Michael Auli.
613
+
614
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
615
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
616
+ etc.)
617
+
618
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
619
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
620
+ and behavior.
621
+
622
+ Parameters:
623
+ config ([`Data2VecTextConfig`]): Model configuration class with all the parameters of the
624
+ model. Initializing with a config file does not load the weights associated with the model, only the
625
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
626
+ """
627
+
628
+ DATA2VECTEXT_INPUTS_DOCSTRING = r"""
629
+ Args:
630
+ input_ids (`torch.LongTensor` of shape `({0})`):
631
+ Indices of input sequence tokens in the vocabulary.
632
+
633
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
634
+ [`PreTrainedTokenizer.__call__`] for details.
635
+
636
+ [What are input IDs?](../glossary#input-ids)
637
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
638
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
639
+
640
+ - 1 for tokens that are **not masked**,
641
+ - 0 for tokens that are **masked**.
642
+
643
+ [What are attention masks?](../glossary#attention-mask)
644
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
645
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
646
+ 1]`:
647
+
648
+ - 0 corresponds to a *sentence A* token,
649
+ - 1 corresponds to a *sentence B* token.
650
+
651
+ [What are token type IDs?](../glossary#token-type-ids)
652
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
653
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
654
+ config.max_position_embeddings - 1]`.
655
+
656
+ [What are position IDs?](../glossary#position-ids)
657
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
658
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
659
+
660
+ - 1 indicates the head is **not masked**,
661
+ - 0 indicates the head is **masked**.
662
+
663
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
664
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
665
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
666
+ model's internal embedding lookup matrix.
667
+ output_attentions (`bool`, *optional*):
668
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
669
+ tensors for more detail.
670
+ output_hidden_states (`bool`, *optional*):
671
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
672
+ more detail.
673
+ return_dict (`bool`, *optional*):
674
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
675
+ """
676
+
677
+
678
+ @add_start_docstrings(
679
+ "The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top.",
680
+ DATA2VECTEXT_START_DOCSTRING,
681
+ )
682
+ class Data2VecTextModel(Data2VecTextPreTrainedModel):
683
+ """
684
+
685
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
686
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
687
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
688
+ Kaiser and Illia Polosukhin.
689
+
690
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
691
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
692
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
693
+
694
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
695
+
696
+ """
697
+
698
+ def __init__(self, config, add_pooling_layer=True):
699
+ super().__init__(config)
700
+ self.config = config
701
+
702
+ self.embeddings = Data2VecTextForTextEmbeddings(config)
703
+ self.encoder = Data2VecTextEncoder(config)
704
+
705
+ self.pooler = Data2VecTextPooler(config) if add_pooling_layer else None
706
+
707
+ # Initialize weights and apply final processing
708
+ self.post_init()
709
+
710
+ def get_input_embeddings(self):
711
+ return self.embeddings.word_embeddings
712
+
713
+ def set_input_embeddings(self, value):
714
+ self.embeddings.word_embeddings = value
715
+
716
+ def _prune_heads(self, heads_to_prune):
717
+ """
718
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
719
+ class PreTrainedModel
720
+ """
721
+ for layer, heads in heads_to_prune.items():
722
+ self.encoder.layer[layer].attention.prune_heads(heads)
723
+
724
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
725
+ @add_code_sample_docstrings(
726
+ checkpoint=_CHECKPOINT_FOR_DOC,
727
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
728
+ config_class=_CONFIG_FOR_DOC,
729
+ )
730
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
731
+ def forward(
732
+ self,
733
+ input_ids: Optional[torch.Tensor] = None,
734
+ attention_mask: Optional[torch.Tensor] = None,
735
+ token_type_ids: Optional[torch.Tensor] = None,
736
+ position_ids: Optional[torch.Tensor] = None,
737
+ head_mask: Optional[torch.Tensor] = None,
738
+ inputs_embeds: Optional[torch.Tensor] = None,
739
+ encoder_hidden_states: Optional[torch.Tensor] = None,
740
+ encoder_attention_mask: Optional[torch.Tensor] = None,
741
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
742
+ use_cache: Optional[bool] = None,
743
+ output_attentions: Optional[bool] = None,
744
+ output_hidden_states: Optional[bool] = None,
745
+ return_dict: Optional[bool] = None,
746
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
747
+ r"""
748
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
749
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
750
+ the model is configured as a decoder.
751
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
752
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
753
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
754
+
755
+ - 1 for tokens that are **not masked**,
756
+ - 0 for tokens that are **masked**.
757
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
758
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
759
+
760
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
761
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
762
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
763
+ use_cache (`bool`, *optional*):
764
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
765
+ `past_key_values`).
766
+ """
767
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
768
+ output_hidden_states = (
769
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
770
+ )
771
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
772
+
773
+ if self.config.is_decoder:
774
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
775
+ else:
776
+ use_cache = False
777
+
778
+ if input_ids is not None and inputs_embeds is not None:
779
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
780
+ elif input_ids is not None:
781
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
782
+ input_shape = input_ids.size()
783
+ elif inputs_embeds is not None:
784
+ input_shape = inputs_embeds.size()[:-1]
785
+ else:
786
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
787
+
788
+ batch_size, seq_length = input_shape
789
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
790
+
791
+ # past_key_values_length
792
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
793
+
794
+ if attention_mask is None:
795
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
796
+
797
+ if token_type_ids is None:
798
+ if hasattr(self.embeddings, "token_type_ids"):
799
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
800
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
801
+ token_type_ids = buffered_token_type_ids_expanded
802
+ else:
803
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
804
+
805
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
806
+ # ourselves in which case we just need to make it broadcastable to all heads.
807
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
808
+
809
+ # If a 2D or 3D attention mask is provided for the cross-attention
810
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
811
+ if self.config.is_decoder and encoder_hidden_states is not None:
812
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
813
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
814
+ if encoder_attention_mask is None:
815
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
816
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
817
+ else:
818
+ encoder_extended_attention_mask = None
819
+
820
+ # Prepare head mask if needed
821
+ # 1.0 in head_mask indicate we keep the head
822
+ # attention_probs has shape bsz x n_heads x N x N
823
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
824
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
825
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
826
+
827
+ embedding_output = self.embeddings(
828
+ input_ids=input_ids,
829
+ position_ids=position_ids,
830
+ token_type_ids=token_type_ids,
831
+ inputs_embeds=inputs_embeds,
832
+ past_key_values_length=past_key_values_length,
833
+ )
834
+ encoder_outputs = self.encoder(
835
+ embedding_output,
836
+ attention_mask=extended_attention_mask,
837
+ head_mask=head_mask,
838
+ encoder_hidden_states=encoder_hidden_states,
839
+ encoder_attention_mask=encoder_extended_attention_mask,
840
+ past_key_values=past_key_values,
841
+ use_cache=use_cache,
842
+ output_attentions=output_attentions,
843
+ output_hidden_states=output_hidden_states,
844
+ return_dict=return_dict,
845
+ )
846
+ sequence_output = encoder_outputs[0]
847
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
848
+
849
+ if not return_dict:
850
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
851
+
852
+ return BaseModelOutputWithPoolingAndCrossAttentions(
853
+ last_hidden_state=sequence_output,
854
+ pooler_output=pooled_output,
855
+ past_key_values=encoder_outputs.past_key_values,
856
+ hidden_states=encoder_outputs.hidden_states,
857
+ attentions=encoder_outputs.attentions,
858
+ cross_attentions=encoder_outputs.cross_attentions,
859
+ )
860
+
861
+
862
+ @add_start_docstrings(
863
+ """Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.""", DATA2VECTEXT_START_DOCSTRING
864
+ )
865
+ class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel):
866
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
867
+
868
+ def __init__(self, config):
869
+ super().__init__(config)
870
+
871
+ if not config.is_decoder:
872
+ logger.warning("If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`")
873
+
874
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
875
+ self.lm_head = Data2VecTextLMHead(config)
876
+
877
+ # Initialize weights and apply final processing
878
+ self.post_init()
879
+
880
+ def get_output_embeddings(self):
881
+ return self.lm_head.decoder
882
+
883
+ def set_output_embeddings(self, new_embeddings):
884
+ self.lm_head.decoder = new_embeddings
885
+
886
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
887
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
888
+ def forward(
889
+ self,
890
+ input_ids: Optional[torch.LongTensor] = None,
891
+ attention_mask: Optional[torch.FloatTensor] = None,
892
+ token_type_ids: Optional[torch.LongTensor] = None,
893
+ position_ids: Optional[torch.LongTensor] = None,
894
+ head_mask: Optional[torch.FloatTensor] = None,
895
+ inputs_embeds: Optional[torch.FloatTensor] = None,
896
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
897
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
898
+ labels: Optional[torch.LongTensor] = None,
899
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
900
+ use_cache: Optional[bool] = None,
901
+ output_attentions: Optional[bool] = None,
902
+ output_hidden_states: Optional[bool] = None,
903
+ return_dict: Optional[bool] = None,
904
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
905
+ r"""
906
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
907
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
908
+ the model is configured as a decoder.
909
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
910
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
911
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
912
+
913
+ - 1 for tokens that are **not masked**,
914
+ - 0 for tokens that are **masked**.
915
+
916
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
917
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
918
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
919
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
920
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
921
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
922
+
923
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
924
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
925
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
926
+ use_cache (`bool`, *optional*):
927
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
928
+ `past_key_values`).
929
+
930
+ Returns:
931
+
932
+ Example:
933
+
934
+ ```python
935
+ >>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig
936
+ >>> import torch
937
+
938
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base")
939
+ >>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base")
940
+ >>> config.is_decoder = True
941
+ >>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config)
942
+
943
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
944
+ >>> outputs = model(**inputs)
945
+
946
+ >>> prediction_logits = outputs.logits
947
+ ```"""
948
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
949
+ if labels is not None:
950
+ use_cache = False
951
+
952
+ outputs = self.data2vec_text(
953
+ input_ids,
954
+ attention_mask=attention_mask,
955
+ token_type_ids=token_type_ids,
956
+ position_ids=position_ids,
957
+ head_mask=head_mask,
958
+ inputs_embeds=inputs_embeds,
959
+ encoder_hidden_states=encoder_hidden_states,
960
+ encoder_attention_mask=encoder_attention_mask,
961
+ past_key_values=past_key_values,
962
+ use_cache=use_cache,
963
+ output_attentions=output_attentions,
964
+ output_hidden_states=output_hidden_states,
965
+ return_dict=return_dict,
966
+ )
967
+
968
+ sequence_output = outputs[0]
969
+ prediction_scores = self.lm_head(sequence_output)
970
+
971
+ lm_loss = None
972
+ if labels is not None:
973
+ # we are doing next-token prediction; shift prediction scores and input ids by one
974
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
975
+ labels = labels[:, 1:].contiguous()
976
+ loss_fct = CrossEntropyLoss()
977
+
978
+ labels = labels.to(shifted_prediction_scores.device)
979
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
980
+
981
+ if not return_dict:
982
+ output = (prediction_scores,) + outputs[2:]
983
+ return ((lm_loss,) + output) if lm_loss is not None else output
984
+
985
+ return CausalLMOutputWithCrossAttentions(
986
+ loss=lm_loss,
987
+ logits=prediction_scores,
988
+ past_key_values=outputs.past_key_values,
989
+ hidden_states=outputs.hidden_states,
990
+ attentions=outputs.attentions,
991
+ cross_attentions=outputs.cross_attentions,
992
+ )
993
+
994
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
995
+ input_shape = input_ids.shape
996
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
997
+ if attention_mask is None:
998
+ attention_mask = input_ids.new_ones(input_shape)
999
+
1000
+ # cut decoder_input_ids if past_key_values is used
1001
+ if past_key_values is not None:
1002
+ past_length = past_key_values[0][0].shape[2]
1003
+
1004
+ # Some generation methods already pass only the last input ID
1005
+ if input_ids.shape[1] > past_length:
1006
+ remove_prefix_length = past_length
1007
+ else:
1008
+ # Default to old behavior: keep only final ID
1009
+ remove_prefix_length = input_ids.shape[1] - 1
1010
+
1011
+ input_ids = input_ids[:, remove_prefix_length:]
1012
+
1013
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1014
+
1015
+ def _reorder_cache(self, past_key_values, beam_idx):
1016
+ reordered_past = ()
1017
+ for layer_past in past_key_values:
1018
+ reordered_past += (
1019
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1020
+ )
1021
+ return reordered_past
1022
+
1023
+
1024
+ @add_start_docstrings("""data2vec Model with a `language modeling` head on top.""", DATA2VECTEXT_START_DOCSTRING)
1025
+ class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel):
1026
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1027
+
1028
+ def __init__(self, config):
1029
+ super().__init__(config)
1030
+
1031
+ if config.is_decoder:
1032
+ logger.warning(
1033
+ "If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for "
1034
+ "bi-directional self-attention."
1035
+ )
1036
+
1037
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1038
+ self.lm_head = Data2VecTextLMHead(config)
1039
+
1040
+ # Initialize weights and apply final processing
1041
+ self.post_init()
1042
+
1043
+ def get_output_embeddings(self):
1044
+ return self.lm_head.decoder
1045
+
1046
+ def set_output_embeddings(self, new_embeddings):
1047
+ self.lm_head.decoder = new_embeddings
1048
+
1049
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1050
+ @add_code_sample_docstrings(
1051
+ checkpoint=_CHECKPOINT_FOR_DOC,
1052
+ output_type=MaskedLMOutput,
1053
+ config_class=_CONFIG_FOR_DOC,
1054
+ mask="<mask>",
1055
+ )
1056
+ def forward(
1057
+ self,
1058
+ input_ids: Optional[torch.LongTensor] = None,
1059
+ attention_mask: Optional[torch.FloatTensor] = None,
1060
+ token_type_ids: Optional[torch.LongTensor] = None,
1061
+ position_ids: Optional[torch.LongTensor] = None,
1062
+ head_mask: Optional[torch.FloatTensor] = None,
1063
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1064
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1065
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1066
+ labels: Optional[torch.LongTensor] = None,
1067
+ output_attentions: Optional[bool] = None,
1068
+ output_hidden_states: Optional[bool] = None,
1069
+ return_dict: Optional[bool] = None,
1070
+ ) -> Union[Tuple, MaskedLMOutput]:
1071
+ r"""
1072
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1073
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1074
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1075
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1076
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1077
+ Used to hide legacy arguments that have been deprecated.
1078
+ """
1079
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1080
+
1081
+ outputs = self.data2vec_text(
1082
+ input_ids,
1083
+ attention_mask=attention_mask,
1084
+ token_type_ids=token_type_ids,
1085
+ position_ids=position_ids,
1086
+ head_mask=head_mask,
1087
+ inputs_embeds=inputs_embeds,
1088
+ encoder_hidden_states=encoder_hidden_states,
1089
+ encoder_attention_mask=encoder_attention_mask,
1090
+ output_attentions=output_attentions,
1091
+ output_hidden_states=output_hidden_states,
1092
+ return_dict=return_dict,
1093
+ )
1094
+ sequence_output = outputs[0]
1095
+ prediction_scores = self.lm_head(sequence_output)
1096
+
1097
+ masked_lm_loss = None
1098
+ if labels is not None:
1099
+ loss_fct = CrossEntropyLoss()
1100
+
1101
+ labels = labels.to(prediction_scores.device)
1102
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1103
+
1104
+ if not return_dict:
1105
+ output = (prediction_scores,) + outputs[2:]
1106
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1107
+
1108
+ return MaskedLMOutput(
1109
+ loss=masked_lm_loss,
1110
+ logits=prediction_scores,
1111
+ hidden_states=outputs.hidden_states,
1112
+ attentions=outputs.attentions,
1113
+ )
1114
+
1115
+
1116
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Data2VecText
1117
+ class Data2VecTextLMHead(nn.Module):
1118
+ """Data2VecText Head for masked language modeling."""
1119
+
1120
+ def __init__(self, config):
1121
+ super().__init__()
1122
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1123
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1124
+
1125
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1126
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1127
+ self.decoder.bias = self.bias
1128
+
1129
+ def forward(self, features, **kwargs):
1130
+ x = self.dense(features)
1131
+ x = gelu(x)
1132
+ x = self.layer_norm(x)
1133
+
1134
+ # project back to size of vocabulary with bias
1135
+ x = self.decoder(x)
1136
+
1137
+ return x
1138
+
1139
+ def _tie_weights(self):
1140
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1141
+ # For accelerate compatibility and to not break backward compatibility
1142
+ if self.decoder.bias.device.type == "meta":
1143
+ self.decoder.bias = self.bias
1144
+ else:
1145
+ self.bias = self.decoder.bias
1146
+
1147
+
1148
+ @add_start_docstrings(
1149
+ """
1150
+ Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1151
+ pooled output) e.g. for GLUE tasks.
1152
+ """,
1153
+ DATA2VECTEXT_START_DOCSTRING,
1154
+ )
1155
+ class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel):
1156
+ def __init__(self, config):
1157
+ super().__init__(config)
1158
+ self.num_labels = config.num_labels
1159
+ self.config = config
1160
+
1161
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1162
+ self.classifier = Data2VecTextClassificationHead(config)
1163
+
1164
+ # Initialize weights and apply final processing
1165
+ self.post_init()
1166
+
1167
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1168
+ @add_code_sample_docstrings(
1169
+ checkpoint=_CHECKPOINT_FOR_DOC,
1170
+ output_type=SequenceClassifierOutput,
1171
+ config_class=_CONFIG_FOR_DOC,
1172
+ )
1173
+ def forward(
1174
+ self,
1175
+ input_ids: Optional[torch.LongTensor] = None,
1176
+ attention_mask: Optional[torch.FloatTensor] = None,
1177
+ token_type_ids: Optional[torch.LongTensor] = None,
1178
+ position_ids: Optional[torch.LongTensor] = None,
1179
+ head_mask: Optional[torch.FloatTensor] = None,
1180
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1181
+ labels: Optional[torch.LongTensor] = None,
1182
+ output_attentions: Optional[bool] = None,
1183
+ output_hidden_states: Optional[bool] = None,
1184
+ return_dict: Optional[bool] = None,
1185
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1186
+ r"""
1187
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1188
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1189
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1190
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1191
+ """
1192
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1193
+
1194
+ outputs = self.data2vec_text(
1195
+ input_ids,
1196
+ attention_mask=attention_mask,
1197
+ token_type_ids=token_type_ids,
1198
+ position_ids=position_ids,
1199
+ head_mask=head_mask,
1200
+ inputs_embeds=inputs_embeds,
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ )
1205
+ sequence_output = outputs[0]
1206
+ logits = self.classifier(sequence_output)
1207
+
1208
+ loss = None
1209
+ if labels is not None:
1210
+ labels = labels.to(logits.device)
1211
+
1212
+ if self.config.problem_type is None:
1213
+ if self.num_labels == 1:
1214
+ self.config.problem_type = "regression"
1215
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1216
+ self.config.problem_type = "single_label_classification"
1217
+ else:
1218
+ self.config.problem_type = "multi_label_classification"
1219
+
1220
+ if self.config.problem_type == "regression":
1221
+ loss_fct = MSELoss()
1222
+ if self.num_labels == 1:
1223
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1224
+ else:
1225
+ loss = loss_fct(logits, labels)
1226
+ elif self.config.problem_type == "single_label_classification":
1227
+ loss_fct = CrossEntropyLoss()
1228
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1229
+ elif self.config.problem_type == "multi_label_classification":
1230
+ loss_fct = BCEWithLogitsLoss()
1231
+ loss = loss_fct(logits, labels)
1232
+
1233
+ if not return_dict:
1234
+ output = (logits,) + outputs[2:]
1235
+ return ((loss,) + output) if loss is not None else output
1236
+
1237
+ return SequenceClassifierOutput(
1238
+ loss=loss,
1239
+ logits=logits,
1240
+ hidden_states=outputs.hidden_states,
1241
+ attentions=outputs.attentions,
1242
+ )
1243
+
1244
+
1245
+ @add_start_docstrings(
1246
+ """
1247
+ Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output
1248
+ and a softmax) e.g. for RocStories/SWAG tasks.
1249
+ """,
1250
+ DATA2VECTEXT_START_DOCSTRING,
1251
+ )
1252
+ class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel):
1253
+ def __init__(self, config):
1254
+ super().__init__(config)
1255
+
1256
+ self.data2vec_text = Data2VecTextModel(config)
1257
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1258
+ self.classifier = nn.Linear(config.hidden_size, 1)
1259
+
1260
+ # Initialize weights and apply final processing
1261
+ self.post_init()
1262
+
1263
+ @add_start_docstrings_to_model_forward(
1264
+ DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1265
+ )
1266
+ @add_code_sample_docstrings(
1267
+ checkpoint=_CHECKPOINT_FOR_DOC,
1268
+ output_type=MultipleChoiceModelOutput,
1269
+ config_class=_CONFIG_FOR_DOC,
1270
+ )
1271
+ def forward(
1272
+ self,
1273
+ input_ids: Optional[torch.LongTensor] = None,
1274
+ token_type_ids: Optional[torch.LongTensor] = None,
1275
+ attention_mask: Optional[torch.FloatTensor] = None,
1276
+ labels: Optional[torch.LongTensor] = None,
1277
+ position_ids: Optional[torch.LongTensor] = None,
1278
+ head_mask: Optional[torch.FloatTensor] = None,
1279
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1280
+ output_attentions: Optional[bool] = None,
1281
+ output_hidden_states: Optional[bool] = None,
1282
+ return_dict: Optional[bool] = None,
1283
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1284
+ r"""
1285
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1286
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1287
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1288
+ `input_ids` above)
1289
+ """
1290
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1291
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1292
+
1293
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1294
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1295
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1296
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1297
+ flat_inputs_embeds = (
1298
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1299
+ if inputs_embeds is not None
1300
+ else None
1301
+ )
1302
+
1303
+ outputs = self.data2vec_text(
1304
+ flat_input_ids,
1305
+ position_ids=flat_position_ids,
1306
+ token_type_ids=flat_token_type_ids,
1307
+ attention_mask=flat_attention_mask,
1308
+ head_mask=head_mask,
1309
+ inputs_embeds=flat_inputs_embeds,
1310
+ output_attentions=output_attentions,
1311
+ output_hidden_states=output_hidden_states,
1312
+ return_dict=return_dict,
1313
+ )
1314
+ pooled_output = outputs[1]
1315
+
1316
+ pooled_output = self.dropout(pooled_output)
1317
+ logits = self.classifier(pooled_output)
1318
+ reshaped_logits = logits.view(-1, num_choices)
1319
+
1320
+ loss = None
1321
+ if labels is not None:
1322
+ loss_fct = CrossEntropyLoss()
1323
+
1324
+ labels = labels.to(reshaped_logits.device)
1325
+ loss = loss_fct(reshaped_logits, labels)
1326
+
1327
+ if not return_dict:
1328
+ output = (reshaped_logits,) + outputs[2:]
1329
+ return ((loss,) + output) if loss is not None else output
1330
+
1331
+ return MultipleChoiceModelOutput(
1332
+ loss=loss,
1333
+ logits=reshaped_logits,
1334
+ hidden_states=outputs.hidden_states,
1335
+ attentions=outputs.attentions,
1336
+ )
1337
+
1338
+
1339
+ @add_start_docstrings(
1340
+ """
1341
+ Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1342
+ for Named-Entity-Recognition (NER) tasks.
1343
+ """,
1344
+ DATA2VECTEXT_START_DOCSTRING,
1345
+ )
1346
+ class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel):
1347
+ def __init__(self, config):
1348
+ super().__init__(config)
1349
+ self.num_labels = config.num_labels
1350
+
1351
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1352
+ classifier_dropout = (
1353
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1354
+ )
1355
+ self.dropout = nn.Dropout(classifier_dropout)
1356
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1357
+
1358
+ # Initialize weights and apply final processing
1359
+ self.post_init()
1360
+
1361
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1362
+ @add_code_sample_docstrings(
1363
+ checkpoint=_CHECKPOINT_FOR_DOC,
1364
+ output_type=TokenClassifierOutput,
1365
+ config_class=_CONFIG_FOR_DOC,
1366
+ )
1367
+ def forward(
1368
+ self,
1369
+ input_ids: Optional[torch.LongTensor] = None,
1370
+ attention_mask: Optional[torch.FloatTensor] = None,
1371
+ token_type_ids: Optional[torch.LongTensor] = None,
1372
+ position_ids: Optional[torch.LongTensor] = None,
1373
+ head_mask: Optional[torch.FloatTensor] = None,
1374
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1375
+ labels: Optional[torch.LongTensor] = None,
1376
+ output_attentions: Optional[bool] = None,
1377
+ output_hidden_states: Optional[bool] = None,
1378
+ return_dict: Optional[bool] = None,
1379
+ ) -> Union[Tuple, TokenClassifierOutput]:
1380
+ r"""
1381
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1382
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1383
+ """
1384
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1385
+
1386
+ outputs = self.data2vec_text(
1387
+ input_ids,
1388
+ attention_mask=attention_mask,
1389
+ token_type_ids=token_type_ids,
1390
+ position_ids=position_ids,
1391
+ head_mask=head_mask,
1392
+ inputs_embeds=inputs_embeds,
1393
+ output_attentions=output_attentions,
1394
+ output_hidden_states=output_hidden_states,
1395
+ return_dict=return_dict,
1396
+ )
1397
+
1398
+ sequence_output = outputs[0]
1399
+
1400
+ sequence_output = self.dropout(sequence_output)
1401
+ logits = self.classifier(sequence_output)
1402
+
1403
+ loss = None
1404
+ if labels is not None:
1405
+ loss_fct = CrossEntropyLoss()
1406
+
1407
+ labels = labels.to(logits.device)
1408
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1409
+
1410
+ if not return_dict:
1411
+ output = (logits,) + outputs[2:]
1412
+ return ((loss,) + output) if loss is not None else output
1413
+
1414
+ return TokenClassifierOutput(
1415
+ loss=loss,
1416
+ logits=logits,
1417
+ hidden_states=outputs.hidden_states,
1418
+ attentions=outputs.attentions,
1419
+ )
1420
+
1421
+
1422
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Data2VecText
1423
+ class Data2VecTextClassificationHead(nn.Module):
1424
+ """Head for sentence-level classification tasks."""
1425
+
1426
+ def __init__(self, config):
1427
+ super().__init__()
1428
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1429
+ classifier_dropout = (
1430
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1431
+ )
1432
+ self.dropout = nn.Dropout(classifier_dropout)
1433
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1434
+
1435
+ def forward(self, features, **kwargs):
1436
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1437
+ x = self.dropout(x)
1438
+ x = self.dense(x)
1439
+ x = torch.tanh(x)
1440
+ x = self.dropout(x)
1441
+ x = self.out_proj(x)
1442
+ return x
1443
+
1444
+
1445
+ @add_start_docstrings(
1446
+ """
1447
+ Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1448
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1449
+ """,
1450
+ DATA2VECTEXT_START_DOCSTRING,
1451
+ )
1452
+ class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel):
1453
+ def __init__(self, config):
1454
+ super().__init__(config)
1455
+ self.num_labels = config.num_labels
1456
+
1457
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1458
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1459
+
1460
+ # Initialize weights and apply final processing
1461
+ self.post_init()
1462
+
1463
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1464
+ @add_code_sample_docstrings(
1465
+ checkpoint=_CHECKPOINT_FOR_DOC,
1466
+ output_type=QuestionAnsweringModelOutput,
1467
+ config_class=_CONFIG_FOR_DOC,
1468
+ )
1469
+ def forward(
1470
+ self,
1471
+ input_ids: Optional[torch.LongTensor] = None,
1472
+ attention_mask: Optional[torch.FloatTensor] = None,
1473
+ token_type_ids: Optional[torch.LongTensor] = None,
1474
+ position_ids: Optional[torch.LongTensor] = None,
1475
+ head_mask: Optional[torch.FloatTensor] = None,
1476
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1477
+ start_positions: Optional[torch.LongTensor] = None,
1478
+ end_positions: Optional[torch.LongTensor] = None,
1479
+ output_attentions: Optional[bool] = None,
1480
+ output_hidden_states: Optional[bool] = None,
1481
+ return_dict: Optional[bool] = None,
1482
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1483
+ r"""
1484
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1485
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1486
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1487
+ are not taken into account for computing the loss.
1488
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1489
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1490
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1491
+ are not taken into account for computing the loss.
1492
+ """
1493
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1494
+
1495
+ outputs = self.data2vec_text(
1496
+ input_ids,
1497
+ attention_mask=attention_mask,
1498
+ token_type_ids=token_type_ids,
1499
+ position_ids=position_ids,
1500
+ head_mask=head_mask,
1501
+ inputs_embeds=inputs_embeds,
1502
+ output_attentions=output_attentions,
1503
+ output_hidden_states=output_hidden_states,
1504
+ return_dict=return_dict,
1505
+ )
1506
+
1507
+ sequence_output = outputs[0]
1508
+
1509
+ logits = self.qa_outputs(sequence_output)
1510
+ start_logits, end_logits = logits.split(1, dim=-1)
1511
+ start_logits = start_logits.squeeze(-1).contiguous()
1512
+ end_logits = end_logits.squeeze(-1).contiguous()
1513
+
1514
+ total_loss = None
1515
+ if start_positions is not None and end_positions is not None:
1516
+ # If we are on multi-GPU, split add a dimension
1517
+ if len(start_positions.size()) > 1:
1518
+ start_positions = start_positions.squeeze(-1)
1519
+ if len(end_positions.size()) > 1:
1520
+ end_positions = end_positions.squeeze(-1)
1521
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1522
+ ignored_index = start_logits.size(1)
1523
+ start_positions = start_positions.clamp(0, ignored_index)
1524
+ end_positions = end_positions.clamp(0, ignored_index)
1525
+
1526
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1527
+ start_loss = loss_fct(start_logits, start_positions)
1528
+ end_loss = loss_fct(end_logits, end_positions)
1529
+ total_loss = (start_loss + end_loss) / 2
1530
+
1531
+ if not return_dict:
1532
+ output = (start_logits, end_logits) + outputs[2:]
1533
+ return ((total_loss,) + output) if total_loss is not None else output
1534
+
1535
+ return QuestionAnsweringModelOutput(
1536
+ loss=total_loss,
1537
+ start_logits=start_logits,
1538
+ end_logits=end_logits,
1539
+ hidden_states=outputs.hidden_states,
1540
+ attentions=outputs.attentions,
1541
+ )
1542
+
1543
+
1544
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1545
+ """
1546
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1547
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1548
+
1549
+ Args:
1550
+ x: torch.Tensor x:
1551
+
1552
+ Returns: torch.Tensor
1553
+ """
1554
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1555
+ mask = input_ids.ne(padding_idx).int()
1556
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1557
+ return incremental_indices.long() + padding_idx
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_vision.py ADDED
@@ -0,0 +1,1228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Data2VecVision model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPooling,
32
+ ImageClassifierOutput,
33
+ SemanticSegmenterOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
37
+ from ...utils import (
38
+ add_code_sample_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_data2vec_vision import Data2VecVisionConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ # General docstring
50
+ _CONFIG_FOR_DOC = "Data2VecVisionConfig"
51
+
52
+ # Base docstring
53
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-vision-base"
54
+ _EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
55
+
56
+ # Image classification docstring
57
+ _IMAGE_CLASS_CHECKPOINT = "facebook/data2vec-vision-base-ft1k"
58
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote"
59
+
60
+
61
+ from ..deprecated._archive_maps import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
62
+
63
+
64
+ @dataclass
65
+ # Copied from transformers.models.beit.modeling_beit.BeitModelOutputWithPooling with Beit->Data2VecVision
66
+ class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling):
67
+ """
68
+ Class for outputs of [`Data2VecVisionModel`].
69
+
70
+ Args:
71
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
72
+ Sequence of hidden-states at the output of the last layer of the model.
73
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
74
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
75
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
76
+ will be returned.
77
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
78
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
79
+ shape `(batch_size, sequence_length, hidden_size)`.
80
+
81
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
82
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
83
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
84
+ sequence_length)`.
85
+
86
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
87
+ heads.
88
+ """
89
+
90
+
91
+ # Copied from transformers.models.beit.modeling_beit.drop_path
92
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
93
+ """
94
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
95
+
96
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
97
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
98
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
99
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
100
+ argument.
101
+ """
102
+ if drop_prob == 0.0 or not training:
103
+ return input
104
+ keep_prob = 1 - drop_prob
105
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
106
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
107
+ random_tensor.floor_() # binarize
108
+ output = input.div(keep_prob) * random_tensor
109
+ return output
110
+
111
+
112
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Data2VecVision
113
+ class Data2VecVisionDropPath(nn.Module):
114
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
115
+
116
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
117
+ super().__init__()
118
+ self.drop_prob = drop_prob
119
+
120
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
121
+ return drop_path(hidden_states, self.drop_prob, self.training)
122
+
123
+ def extra_repr(self) -> str:
124
+ return "p={}".format(self.drop_prob)
125
+
126
+
127
+ # Copied from transformers.models.beit.modeling_beit.BeitEmbeddings with Beit->Data2VecVision
128
+ class Data2VecVisionEmbeddings(nn.Module):
129
+ """
130
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
131
+
132
+ """
133
+
134
+ def __init__(self, config: Data2VecVisionConfig) -> None:
135
+ super().__init__()
136
+
137
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
138
+ if config.use_mask_token:
139
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
140
+ else:
141
+ self.mask_token = None
142
+ self.patch_embeddings = Data2VecVisionPatchEmbeddings(config)
143
+ num_patches = self.patch_embeddings.num_patches
144
+ if config.use_absolute_position_embeddings:
145
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
146
+ else:
147
+ self.position_embeddings = None
148
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
149
+
150
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
151
+ embeddings, (patch_height, patch_width) = self.patch_embeddings(
152
+ pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None
153
+ )
154
+ batch_size, seq_len, _ = embeddings.size()
155
+
156
+ if bool_masked_pos is not None:
157
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
158
+ # replace the masked visual tokens by mask_tokens
159
+ w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
160
+ embeddings = embeddings * (1 - w) + mask_tokens * w
161
+
162
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
163
+ if self.position_embeddings is not None:
164
+ cls_tokens = cls_tokens + self.position_embeddings[:, :1, :]
165
+
166
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
167
+
168
+ embeddings = self.dropout(embeddings)
169
+
170
+ return embeddings, (patch_height, patch_width)
171
+
172
+
173
+ # Copied from transformers.models.beit.modeling_beit.BeitPatchEmbeddings with Beit->Data2VecVision
174
+ class Data2VecVisionPatchEmbeddings(nn.Module):
175
+ """
176
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
177
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
178
+ Transformer.
179
+ """
180
+
181
+ def __init__(self, config):
182
+ super().__init__()
183
+ image_size, patch_size = config.image_size, config.patch_size
184
+ num_channels, hidden_size = config.num_channels, config.hidden_size
185
+
186
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
187
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
188
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
189
+ patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
190
+ self.image_size = image_size
191
+ self.patch_size = patch_size
192
+ self.num_channels = num_channels
193
+ self.num_patches = num_patches
194
+ self.patch_shape = patch_shape
195
+
196
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
197
+
198
+ def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor] = None) -> torch.Tensor:
199
+ batch_size, num_channels, height, width = pixel_values.shape
200
+ if num_channels != self.num_channels:
201
+ raise ValueError(
202
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
203
+ )
204
+
205
+ embeddings = self.projection(pixel_values)
206
+ patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
207
+
208
+ if position_embedding is not None:
209
+ # interpolate the position embedding to the corresponding size
210
+ position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(
211
+ 0, 3, 1, 2
212
+ )
213
+ position_embedding = nn.functional.interpolate(
214
+ position_embedding, size=(patch_height, patch_width), mode="bicubic"
215
+ )
216
+ embeddings = embeddings + position_embedding
217
+
218
+ embeddings = embeddings.flatten(2).transpose(1, 2)
219
+
220
+ return embeddings, (patch_height, patch_width)
221
+
222
+
223
+ # Copied from transformers.models.beit.modeling_beit.BeitSelfAttention with Beit->Data2VecVision
224
+ class Data2VecVisionSelfAttention(nn.Module):
225
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
226
+ super().__init__()
227
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
228
+ raise ValueError(
229
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
230
+ f"heads {config.num_attention_heads}."
231
+ )
232
+
233
+ self.num_attention_heads = config.num_attention_heads
234
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
235
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
236
+
237
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
238
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
239
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
240
+
241
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
242
+
243
+ if window_size:
244
+ self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
245
+ else:
246
+ self.relative_position_bias = None
247
+
248
+ def transpose_for_scores(self, x):
249
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
250
+ x = x.view(*new_x_shape)
251
+ return x.permute(0, 2, 1, 3)
252
+
253
+ def forward(
254
+ self,
255
+ hidden_states: torch.Tensor,
256
+ head_mask: Optional[torch.Tensor] = None,
257
+ output_attentions: bool = False,
258
+ relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
259
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
260
+ mixed_query_layer = self.query(hidden_states)
261
+
262
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
263
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
264
+ query_layer = self.transpose_for_scores(mixed_query_layer)
265
+
266
+ # Take the dot product between "query" and "key" to get the raw attention scores.
267
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
268
+
269
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
270
+
271
+ # Add relative position bias if present.
272
+ if self.relative_position_bias is not None:
273
+ attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0)
274
+
275
+ # Add shared relative position bias if provided.
276
+ if relative_position_bias is not None:
277
+ attention_scores = attention_scores + relative_position_bias
278
+
279
+ # Normalize the attention scores to probabilities.
280
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
281
+
282
+ # This is actually dropping out entire tokens to attend to, which might
283
+ # seem a bit unusual, but is taken from the original Transformer paper.
284
+ attention_probs = self.dropout(attention_probs)
285
+
286
+ # Mask heads if we want to
287
+ if head_mask is not None:
288
+ attention_probs = attention_probs * head_mask
289
+
290
+ context_layer = torch.matmul(attention_probs, value_layer)
291
+
292
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
293
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
294
+ context_layer = context_layer.view(*new_context_layer_shape)
295
+
296
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
297
+
298
+ return outputs
299
+
300
+
301
+ # Copied from transformers.models.beit.modeling_beit.BeitSelfOutput with Beit->Data2VecVision
302
+ class Data2VecVisionSelfOutput(nn.Module):
303
+ """
304
+ The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the
305
+ layernorm applied before each block.
306
+ """
307
+
308
+ def __init__(self, config: Data2VecVisionConfig) -> None:
309
+ super().__init__()
310
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
311
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
312
+
313
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
314
+ hidden_states = self.dense(hidden_states)
315
+ hidden_states = self.dropout(hidden_states)
316
+
317
+ return hidden_states
318
+
319
+
320
+ # Copied from transformers.models.beit.modeling_beit.BeitAttention with Beit->Data2VecVision
321
+ class Data2VecVisionAttention(nn.Module):
322
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
323
+ super().__init__()
324
+ self.attention = Data2VecVisionSelfAttention(config, window_size=window_size)
325
+ self.output = Data2VecVisionSelfOutput(config)
326
+ self.pruned_heads = set()
327
+
328
+ def prune_heads(self, heads):
329
+ if len(heads) == 0:
330
+ return
331
+ heads, index = find_pruneable_heads_and_indices(
332
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
333
+ )
334
+
335
+ # Prune linear layers
336
+ self.attention.query = prune_linear_layer(self.attention.query, index)
337
+ self.attention.key = prune_linear_layer(self.attention.key, index)
338
+ self.attention.value = prune_linear_layer(self.attention.value, index)
339
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
340
+
341
+ # Update hyper params and store pruned heads
342
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
343
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
344
+ self.pruned_heads = self.pruned_heads.union(heads)
345
+
346
+ def forward(
347
+ self,
348
+ hidden_states: torch.Tensor,
349
+ head_mask: Optional[torch.Tensor] = None,
350
+ output_attentions: bool = False,
351
+ relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
352
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
353
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias)
354
+
355
+ attention_output = self.output(self_outputs[0], hidden_states)
356
+
357
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
358
+ return outputs
359
+
360
+
361
+ # Copied from transformers.models.beit.modeling_beit.BeitIntermediate with Beit->Data2VecVision
362
+ class Data2VecVisionIntermediate(nn.Module):
363
+ def __init__(self, config: Data2VecVisionConfig) -> None:
364
+ super().__init__()
365
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
366
+ if isinstance(config.hidden_act, str):
367
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
368
+ else:
369
+ self.intermediate_act_fn = config.hidden_act
370
+
371
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
372
+ hidden_states = self.dense(hidden_states)
373
+ hidden_states = self.intermediate_act_fn(hidden_states)
374
+
375
+ return hidden_states
376
+
377
+
378
+ # Copied from transformers.models.beit.modeling_beit.BeitOutput with Beit->Data2VecVision
379
+ class Data2VecVisionOutput(nn.Module):
380
+ def __init__(self, config: Data2VecVisionConfig) -> None:
381
+ super().__init__()
382
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
383
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
384
+
385
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
386
+ hidden_states = self.dense(hidden_states)
387
+ hidden_states = self.dropout(hidden_states)
388
+
389
+ return hidden_states
390
+
391
+
392
+ # Copied from transformers.models.beit.modeling_beit.BeitLayer with Beit->Data2VecVision,BEiT->Data2VecVision
393
+ class Data2VecVisionLayer(nn.Module):
394
+ """This corresponds to the Block class in the timm implementation."""
395
+
396
+ def __init__(
397
+ self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0
398
+ ) -> None:
399
+ super().__init__()
400
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
401
+ self.seq_len_dim = 1
402
+ self.attention = Data2VecVisionAttention(config, window_size=window_size)
403
+ self.intermediate = Data2VecVisionIntermediate(config)
404
+ self.output = Data2VecVisionOutput(config)
405
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
406
+ self.drop_path = Data2VecVisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
407
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
408
+
409
+ init_values = config.layer_scale_init_value
410
+ if init_values > 0:
411
+ self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
412
+ self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
413
+ else:
414
+ self.lambda_1, self.lambda_2 = None, None
415
+
416
+ def forward(
417
+ self,
418
+ hidden_states: torch.Tensor,
419
+ head_mask: Optional[torch.Tensor] = None,
420
+ output_attentions: bool = False,
421
+ relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
422
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
423
+ self_attention_outputs = self.attention(
424
+ self.layernorm_before(hidden_states), # in Data2VecVision, layernorm is applied before self-attention
425
+ head_mask,
426
+ output_attentions=output_attentions,
427
+ relative_position_bias=relative_position_bias,
428
+ )
429
+ attention_output = self_attention_outputs[0]
430
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
431
+
432
+ # apply lambda_1 if present
433
+ if self.lambda_1 is not None:
434
+ attention_output = self.lambda_1 * attention_output
435
+
436
+ # first residual connection
437
+ hidden_states = self.drop_path(attention_output) + hidden_states
438
+
439
+ # in Data2VecVision, layernorm is also applied after self-attention
440
+ layer_output = self.layernorm_after(hidden_states)
441
+
442
+ layer_output = self.intermediate(layer_output)
443
+ layer_output = self.output(layer_output)
444
+
445
+ if self.lambda_2 is not None:
446
+ layer_output = self.lambda_2 * layer_output
447
+
448
+ # second residual connection
449
+ layer_output = self.drop_path(layer_output) + hidden_states
450
+
451
+ outputs = (layer_output,) + outputs
452
+
453
+ return outputs
454
+
455
+
456
+ # Copied from transformers.models.beit.modeling_beit.BeitRelativePositionBias with Beit->Data2VecVision
457
+ class Data2VecVisionRelativePositionBias(nn.Module):
458
+ def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
459
+ super().__init__()
460
+ self.window_size = window_size
461
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
462
+ self.relative_position_bias_table = nn.Parameter(
463
+ torch.zeros(self.num_relative_distance, config.num_attention_heads)
464
+ ) # 2*Wh-1 * 2*Ww-1, nH
465
+ # cls to token & token 2 cls & cls to cls
466
+
467
+ # get pair-wise relative position index for each token inside the window
468
+ coords_h = torch.arange(window_size[0])
469
+ coords_w = torch.arange(window_size[1])
470
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) # 2, Wh, Ww
471
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
472
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
473
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
474
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
475
+ relative_coords[:, :, 1] += window_size[1] - 1
476
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
477
+ relative_position_index = torch.zeros(
478
+ size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
479
+ )
480
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
481
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
482
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
483
+ relative_position_index[0, 0] = self.num_relative_distance - 1
484
+
485
+ self.register_buffer("relative_position_index", relative_position_index, persistent=False)
486
+
487
+ def forward(self) -> torch.Tensor:
488
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
489
+ self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1
490
+ ) # Wh*Ww,Wh*Ww,nH
491
+
492
+ return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
493
+
494
+
495
+ # Copied from transformers.models.beit.modeling_beit.BeitEncoder with Beit->Data2VecVision
496
+ class Data2VecVisionEncoder(nn.Module):
497
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
498
+ super().__init__()
499
+ self.config = config
500
+ if config.use_shared_relative_position_bias:
501
+ self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
502
+ else:
503
+ self.relative_position_bias = None
504
+
505
+ # stochastic depth decay rule
506
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
507
+ self.layer = nn.ModuleList(
508
+ [
509
+ Data2VecVisionLayer(
510
+ config,
511
+ window_size=window_size if config.use_relative_position_bias else None,
512
+ drop_path_rate=dpr[i],
513
+ )
514
+ for i in range(config.num_hidden_layers)
515
+ ]
516
+ )
517
+ self.gradient_checkpointing = False
518
+
519
+ def forward(
520
+ self,
521
+ hidden_states: torch.Tensor,
522
+ head_mask: Optional[torch.Tensor] = None,
523
+ output_attentions: bool = False,
524
+ output_hidden_states: bool = False,
525
+ return_dict: bool = True,
526
+ ) -> Union[tuple, BaseModelOutput]:
527
+ all_hidden_states = () if output_hidden_states else None
528
+ all_self_attentions = () if output_attentions else None
529
+
530
+ for i, layer_module in enumerate(self.layer):
531
+ if output_hidden_states:
532
+ all_hidden_states = all_hidden_states + (hidden_states,)
533
+
534
+ layer_head_mask = head_mask[i] if head_mask is not None else None
535
+
536
+ if self.gradient_checkpointing and self.training:
537
+ layer_outputs = self._gradient_checkpointing_func(
538
+ layer_module.__call__,
539
+ hidden_states,
540
+ layer_head_mask,
541
+ output_attentions,
542
+ )
543
+ else:
544
+ relative_position_bias = (
545
+ self.relative_position_bias() if self.relative_position_bias is not None else None
546
+ )
547
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
548
+
549
+ hidden_states = layer_outputs[0]
550
+
551
+ if output_attentions:
552
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
553
+
554
+ if output_hidden_states:
555
+ all_hidden_states = all_hidden_states + (hidden_states,)
556
+
557
+ if not return_dict:
558
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
559
+ return BaseModelOutput(
560
+ last_hidden_state=hidden_states,
561
+ hidden_states=all_hidden_states,
562
+ attentions=all_self_attentions,
563
+ )
564
+
565
+
566
+ # Copied from transformers.models.beit.modeling_beit.BeitPreTrainedModel with Beit->Data2VecVision,beit->data2vec_vision
567
+ class Data2VecVisionPreTrainedModel(PreTrainedModel):
568
+ """
569
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
570
+ models.
571
+ """
572
+
573
+ config_class = Data2VecVisionConfig
574
+ base_model_prefix = "data2vec_vision"
575
+ main_input_name = "pixel_values"
576
+ supports_gradient_checkpointing = True
577
+
578
+ def _init_weights(self, module):
579
+ """Initialize the weights"""
580
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
581
+ # Slightly different from the TF version which uses truncated_normal for initialization
582
+ # cf https://github.com/pytorch/pytorch/pull/5617
583
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
584
+ if module.bias is not None:
585
+ module.bias.data.zero_()
586
+ elif isinstance(module, nn.Embedding):
587
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
588
+ if module.padding_idx is not None:
589
+ module.weight.data[module.padding_idx].zero_()
590
+ elif isinstance(module, nn.LayerNorm):
591
+ module.bias.data.zero_()
592
+ module.weight.data.fill_(1.0)
593
+
594
+
595
+ DATA2VEC_VISION_START_DOCSTRING = r"""
596
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
597
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
598
+ behavior.
599
+
600
+ Parameters:
601
+ config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.
602
+ Initializing with a config file does not load the weights associated with the model, only the
603
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
604
+ """
605
+
606
+ DATA2VEC_VISION_INPUTS_DOCSTRING = r"""
607
+ Args:
608
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
609
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
610
+ [`BeitImageProcessor.__call__`] for details.
611
+
612
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
613
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
614
+
615
+ - 1 indicates the head is **not masked**,
616
+ - 0 indicates the head is **masked**.
617
+
618
+ output_attentions (`bool`, *optional*):
619
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
620
+ tensors for more detail.
621
+ output_hidden_states (`bool`, *optional*):
622
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
623
+ more detail.
624
+ return_dict (`bool`, *optional*):
625
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
626
+ """
627
+
628
+
629
+ @add_start_docstrings(
630
+ "The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.",
631
+ DATA2VEC_VISION_START_DOCSTRING,
632
+ )
633
+ # Copied from transformers.models.beit.modeling_beit.BeitModel with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,True->False
634
+ class Data2VecVisionModel(Data2VecVisionPreTrainedModel):
635
+ def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False) -> None:
636
+ super().__init__(config)
637
+ self.config = config
638
+
639
+ self.embeddings = Data2VecVisionEmbeddings(config)
640
+ self.encoder = Data2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
641
+
642
+ self.layernorm = (
643
+ nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
644
+ )
645
+ self.pooler = Data2VecVisionPooler(config) if add_pooling_layer else None
646
+
647
+ # Initialize weights and apply final processing
648
+ self.post_init()
649
+
650
+ def get_input_embeddings(self):
651
+ return self.embeddings.patch_embeddings
652
+
653
+ def _prune_heads(self, heads_to_prune):
654
+ """
655
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
656
+ class PreTrainedModel
657
+ """
658
+ for layer, heads in heads_to_prune.items():
659
+ self.encoder.layer[layer].attention.prune_heads(heads)
660
+
661
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
662
+ @add_code_sample_docstrings(
663
+ checkpoint=_CHECKPOINT_FOR_DOC,
664
+ output_type=Data2VecVisionModelOutputWithPooling,
665
+ config_class=_CONFIG_FOR_DOC,
666
+ modality="vision",
667
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
668
+ )
669
+ def forward(
670
+ self,
671
+ pixel_values: Optional[torch.Tensor] = None,
672
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
673
+ head_mask: Optional[torch.Tensor] = None,
674
+ output_attentions: Optional[bool] = None,
675
+ output_hidden_states: Optional[bool] = None,
676
+ return_dict: Optional[bool] = None,
677
+ ) -> Union[tuple, Data2VecVisionModelOutputWithPooling]:
678
+ r"""
679
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
680
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
681
+ """
682
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
683
+ output_hidden_states = (
684
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
685
+ )
686
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
687
+
688
+ if pixel_values is None:
689
+ raise ValueError("You have to specify pixel_values")
690
+
691
+ # Prepare head mask if needed
692
+ # 1.0 in head_mask indicate we keep the head
693
+ # attention_probs has shape bsz x n_heads x N x N
694
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
695
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
696
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
697
+
698
+ embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values, bool_masked_pos)
699
+
700
+ encoder_outputs = self.encoder(
701
+ embedding_output,
702
+ head_mask=head_mask,
703
+ output_attentions=output_attentions,
704
+ output_hidden_states=output_hidden_states,
705
+ return_dict=return_dict,
706
+ )
707
+ sequence_output = encoder_outputs[0]
708
+ sequence_output = self.layernorm(sequence_output)
709
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
710
+
711
+ if not return_dict:
712
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
713
+ return head_outputs + encoder_outputs[1:]
714
+
715
+ return Data2VecVisionModelOutputWithPooling(
716
+ last_hidden_state=sequence_output,
717
+ pooler_output=pooled_output,
718
+ hidden_states=encoder_outputs.hidden_states,
719
+ attentions=encoder_outputs.attentions,
720
+ )
721
+
722
+
723
+ # Copied from transformers.models.beit.modeling_beit.BeitPooler with Beit->Data2VecVision
724
+ class Data2VecVisionPooler(nn.Module):
725
+ def __init__(self, config: Data2VecVisionConfig) -> None:
726
+ super().__init__()
727
+ self.layernorm = (
728
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
729
+ )
730
+
731
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
732
+ if self.layernorm is not None:
733
+ # Mean pool the final hidden states of the patch tokens
734
+ patch_tokens = hidden_states[:, 1:, :]
735
+ pooled_output = self.layernorm(patch_tokens.mean(1))
736
+ else:
737
+ # Pool by simply taking the final hidden state of the [CLS] token
738
+ pooled_output = hidden_states[:, 0]
739
+
740
+ return pooled_output
741
+
742
+
743
+ @add_start_docstrings(
744
+ """
745
+ Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of
746
+ the final hidden states of the patch tokens) e.g. for ImageNet.
747
+ """,
748
+ DATA2VEC_VISION_START_DOCSTRING,
749
+ )
750
+ # Copied from transformers.models.beit.modeling_beit.BeitForImageClassification with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,beit->data2vec_vision
751
+ class Data2VecVisionForImageClassification(Data2VecVisionPreTrainedModel):
752
+ def __init__(self, config: Data2VecVisionConfig) -> None:
753
+ super().__init__(config)
754
+
755
+ self.num_labels = config.num_labels
756
+ self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=True)
757
+
758
+ # Classifier head
759
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
760
+
761
+ # Initialize weights and apply final processing
762
+ self.post_init()
763
+
764
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
765
+ @add_code_sample_docstrings(
766
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
767
+ output_type=ImageClassifierOutput,
768
+ config_class=_CONFIG_FOR_DOC,
769
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
770
+ )
771
+ def forward(
772
+ self,
773
+ pixel_values: Optional[torch.Tensor] = None,
774
+ head_mask: Optional[torch.Tensor] = None,
775
+ labels: Optional[torch.Tensor] = None,
776
+ output_attentions: Optional[bool] = None,
777
+ output_hidden_states: Optional[bool] = None,
778
+ return_dict: Optional[bool] = None,
779
+ ) -> Union[tuple, ImageClassifierOutput]:
780
+ r"""
781
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
782
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
783
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
784
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
785
+ """
786
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
787
+ outputs = self.data2vec_vision(
788
+ pixel_values,
789
+ head_mask=head_mask,
790
+ output_attentions=output_attentions,
791
+ output_hidden_states=output_hidden_states,
792
+ return_dict=return_dict,
793
+ )
794
+
795
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
796
+
797
+ logits = self.classifier(pooled_output)
798
+
799
+ loss = None
800
+ if labels is not None:
801
+ if self.config.problem_type is None:
802
+ if self.num_labels == 1:
803
+ self.config.problem_type = "regression"
804
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
805
+ self.config.problem_type = "single_label_classification"
806
+ else:
807
+ self.config.problem_type = "multi_label_classification"
808
+
809
+ if self.config.problem_type == "regression":
810
+ loss_fct = MSELoss()
811
+ if self.num_labels == 1:
812
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
813
+ else:
814
+ loss = loss_fct(logits, labels)
815
+ elif self.config.problem_type == "single_label_classification":
816
+ loss_fct = CrossEntropyLoss()
817
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
818
+ elif self.config.problem_type == "multi_label_classification":
819
+ loss_fct = BCEWithLogitsLoss()
820
+ loss = loss_fct(logits, labels)
821
+ if not return_dict:
822
+ output = (logits,) + outputs[2:]
823
+ return ((loss,) + output) if loss is not None else output
824
+
825
+ return ImageClassifierOutput(
826
+ loss=loss,
827
+ logits=logits,
828
+ hidden_states=outputs.hidden_states,
829
+ attentions=outputs.attentions,
830
+ )
831
+
832
+
833
+ # Copied from transformers.models.beit.modeling_beit.BeitConvModule with Beit->Data2VecVision
834
+ class Data2VecVisionConvModule(nn.Module):
835
+ """
836
+ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
837
+ layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
838
+
839
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
840
+ """
841
+
842
+ def __init__(
843
+ self,
844
+ in_channels: int,
845
+ out_channels: int,
846
+ kernel_size: Union[int, Tuple[int, int]],
847
+ padding: Union[int, Tuple[int, int], str] = 0,
848
+ bias: bool = False,
849
+ dilation: Union[int, Tuple[int, int]] = 1,
850
+ ) -> None:
851
+ super().__init__()
852
+ self.conv = nn.Conv2d(
853
+ in_channels=in_channels,
854
+ out_channels=out_channels,
855
+ kernel_size=kernel_size,
856
+ padding=padding,
857
+ bias=bias,
858
+ dilation=dilation,
859
+ )
860
+ self.bn = nn.BatchNorm2d(out_channels)
861
+ self.activation = nn.ReLU()
862
+
863
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
864
+ output = self.conv(input)
865
+ output = self.bn(output)
866
+ output = self.activation(output)
867
+
868
+ return output
869
+
870
+
871
+ # Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingBlock with Beit->Data2VecVision
872
+ class Data2VecVisionPyramidPoolingBlock(nn.Module):
873
+ def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
874
+ super().__init__()
875
+ self.layers = [
876
+ nn.AdaptiveAvgPool2d(pool_scale),
877
+ Data2VecVisionConvModule(in_channels, channels, kernel_size=1),
878
+ ]
879
+ for i, layer in enumerate(self.layers):
880
+ self.add_module(str(i), layer)
881
+
882
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
883
+ hidden_state = input
884
+ for layer in self.layers:
885
+ hidden_state = layer(hidden_state)
886
+ return hidden_state
887
+
888
+
889
+ # Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingModule with Beit->Data2VecVision
890
+ class Data2VecVisionPyramidPoolingModule(nn.Module):
891
+ """
892
+ Pyramid Pooling Module (PPM) used in PSPNet.
893
+
894
+ Args:
895
+ pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
896
+ Module.
897
+ in_channels (int): Input channels.
898
+ channels (int): Channels after modules, before conv_seg.
899
+ align_corners (bool): align_corners argument of F.interpolate.
900
+
901
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
902
+ """
903
+
904
+ def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
905
+ super().__init__()
906
+ self.pool_scales = pool_scales
907
+ self.align_corners = align_corners
908
+ self.in_channels = in_channels
909
+ self.channels = channels
910
+ self.blocks = []
911
+ for i, pool_scale in enumerate(pool_scales):
912
+ block = Data2VecVisionPyramidPoolingBlock(
913
+ pool_scale=pool_scale, in_channels=in_channels, channels=channels
914
+ )
915
+ self.blocks.append(block)
916
+ self.add_module(str(i), block)
917
+
918
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
919
+ ppm_outs = []
920
+ for ppm in self.blocks:
921
+ ppm_out = ppm(x)
922
+ upsampled_ppm_out = nn.functional.interpolate(
923
+ ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
924
+ )
925
+ ppm_outs.append(upsampled_ppm_out)
926
+ return ppm_outs
927
+
928
+
929
+ # Copied from transformers.models.beit.modeling_beit.BeitUperHead with Beit->Data2VecVision
930
+ class Data2VecVisionUperHead(nn.Module):
931
+ """
932
+ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
933
+ [UPerNet](https://arxiv.org/abs/1807.10221).
934
+
935
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
936
+ """
937
+
938
+ def __init__(self, config: Data2VecVisionConfig) -> None:
939
+ super().__init__()
940
+
941
+ self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
942
+ self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
943
+ self.channels = config.hidden_size
944
+ self.align_corners = False
945
+ self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
946
+
947
+ # PSP Module
948
+ self.psp_modules = Data2VecVisionPyramidPoolingModule(
949
+ self.pool_scales,
950
+ self.in_channels[-1],
951
+ self.channels,
952
+ align_corners=self.align_corners,
953
+ )
954
+ self.bottleneck = Data2VecVisionConvModule(
955
+ self.in_channels[-1] + len(self.pool_scales) * self.channels,
956
+ self.channels,
957
+ kernel_size=3,
958
+ padding=1,
959
+ )
960
+ # FPN Module
961
+ self.lateral_convs = nn.ModuleList()
962
+ self.fpn_convs = nn.ModuleList()
963
+ for in_channels in self.in_channels[:-1]: # skip the top layer
964
+ l_conv = Data2VecVisionConvModule(in_channels, self.channels, kernel_size=1)
965
+ fpn_conv = Data2VecVisionConvModule(self.channels, self.channels, kernel_size=3, padding=1)
966
+ self.lateral_convs.append(l_conv)
967
+ self.fpn_convs.append(fpn_conv)
968
+
969
+ self.fpn_bottleneck = Data2VecVisionConvModule(
970
+ len(self.in_channels) * self.channels,
971
+ self.channels,
972
+ kernel_size=3,
973
+ padding=1,
974
+ )
975
+
976
+ def psp_forward(self, inputs):
977
+ x = inputs[-1]
978
+ psp_outs = [x]
979
+ psp_outs.extend(self.psp_modules(x))
980
+ psp_outs = torch.cat(psp_outs, dim=1)
981
+ output = self.bottleneck(psp_outs)
982
+
983
+ return output
984
+
985
+ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
986
+ # build laterals
987
+ laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
988
+
989
+ laterals.append(self.psp_forward(encoder_hidden_states))
990
+
991
+ # build top-down path
992
+ used_backbone_levels = len(laterals)
993
+ for i in range(used_backbone_levels - 1, 0, -1):
994
+ prev_shape = laterals[i - 1].shape[2:]
995
+ laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
996
+ laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
997
+ )
998
+
999
+ # build outputs
1000
+ fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
1001
+ # append psp feature
1002
+ fpn_outs.append(laterals[-1])
1003
+
1004
+ for i in range(used_backbone_levels - 1, 0, -1):
1005
+ fpn_outs[i] = nn.functional.interpolate(
1006
+ fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
1007
+ )
1008
+ fpn_outs = torch.cat(fpn_outs, dim=1)
1009
+ output = self.fpn_bottleneck(fpn_outs)
1010
+ output = self.classifier(output)
1011
+
1012
+ return output
1013
+
1014
+
1015
+ # Copied from transformers.models.beit.modeling_beit.BeitFCNHead with Beit->Data2VecVision
1016
+ class Data2VecVisionFCNHead(nn.Module):
1017
+ """
1018
+ Fully Convolution Networks for Semantic Segmentation. This head is implemented of
1019
+ [FCNNet](https://arxiv.org/abs/1411.4038>).
1020
+
1021
+ Args:
1022
+ config (Data2VecVisionConfig): Configuration.
1023
+ in_channels
1024
+ kernel_size (int): The kernel size for convs in the head. Default: 3.
1025
+ dilation (int): The dilation rate for convs in the head. Default: 1.
1026
+
1027
+
1028
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1029
+ """
1030
+
1031
+ def __init__(
1032
+ self,
1033
+ config: Data2VecVisionConfig,
1034
+ in_index: int = 2,
1035
+ kernel_size: int = 3,
1036
+ dilation: Union[int, Tuple[int, int]] = 1,
1037
+ ) -> None:
1038
+ super().__init__()
1039
+ self.in_channels = config.hidden_size
1040
+ self.channels = config.auxiliary_channels
1041
+ self.num_convs = config.auxiliary_num_convs
1042
+ self.concat_input = config.auxiliary_concat_input
1043
+ self.in_index = in_index
1044
+
1045
+ conv_padding = (kernel_size // 2) * dilation
1046
+ convs = []
1047
+ convs.append(
1048
+ Data2VecVisionConvModule(
1049
+ self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
1050
+ )
1051
+ )
1052
+ for i in range(self.num_convs - 1):
1053
+ convs.append(
1054
+ Data2VecVisionConvModule(
1055
+ self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
1056
+ )
1057
+ )
1058
+ if self.num_convs == 0:
1059
+ self.convs = nn.Identity()
1060
+ else:
1061
+ self.convs = nn.Sequential(*convs)
1062
+ if self.concat_input:
1063
+ self.conv_cat = Data2VecVisionConvModule(
1064
+ self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
1065
+ )
1066
+
1067
+ self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
1068
+
1069
+ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
1070
+ # just take the relevant feature maps
1071
+ hidden_states = encoder_hidden_states[self.in_index]
1072
+ output = self.convs(hidden_states)
1073
+ if self.concat_input:
1074
+ output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
1075
+ output = self.classifier(output)
1076
+ return output
1077
+
1078
+
1079
+ @add_start_docstrings(
1080
+ """
1081
+ Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
1082
+ """,
1083
+ DATA2VEC_VISION_START_DOCSTRING,
1084
+ )
1085
+ # Copied from transformers.models.beit.modeling_beit.BeitForSemanticSegmentation with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,microsoft/beit-base-finetuned-ade-640-640->facebook/data2vec-vision-base,beit->data2vec_vision
1086
+ class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):
1087
+ def __init__(self, config: Data2VecVisionConfig) -> None:
1088
+ super().__init__(config)
1089
+
1090
+ self.num_labels = config.num_labels
1091
+ self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=False)
1092
+
1093
+ # FPNs
1094
+ if len(self.config.out_indices) != 4:
1095
+ raise ValueError(
1096
+ "Data2VecVisionForSemanticSegmentation requires config.out_indices to be a list of 4 integers, "
1097
+ "specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
1098
+ "a base-sized architecture."
1099
+ )
1100
+ self.fpn1 = nn.Sequential(
1101
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
1102
+ nn.BatchNorm2d(config.hidden_size),
1103
+ nn.GELU(),
1104
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
1105
+ )
1106
+ self.fpn2 = nn.Sequential(
1107
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
1108
+ )
1109
+ self.fpn3 = nn.Identity()
1110
+ self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
1111
+
1112
+ # Semantic segmentation head(s)
1113
+ self.decode_head = Data2VecVisionUperHead(config)
1114
+ self.auxiliary_head = Data2VecVisionFCNHead(config) if config.use_auxiliary_head else None
1115
+
1116
+ # Initialize weights and apply final processing
1117
+ self.post_init()
1118
+
1119
+ def compute_loss(self, logits, auxiliary_logits, labels):
1120
+ # upsample logits to the images' original size
1121
+ upsampled_logits = nn.functional.interpolate(
1122
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
1123
+ )
1124
+ if auxiliary_logits is not None:
1125
+ upsampled_auxiliary_logits = nn.functional.interpolate(
1126
+ auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
1127
+ )
1128
+ # compute weighted loss
1129
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
1130
+ main_loss = loss_fct(upsampled_logits, labels)
1131
+ loss = main_loss
1132
+ if auxiliary_logits is not None:
1133
+ auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
1134
+ loss += self.config.auxiliary_loss_weight * auxiliary_loss
1135
+
1136
+ return loss
1137
+
1138
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
1139
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
1140
+ def forward(
1141
+ self,
1142
+ pixel_values: Optional[torch.Tensor] = None,
1143
+ head_mask: Optional[torch.Tensor] = None,
1144
+ labels: Optional[torch.Tensor] = None,
1145
+ output_attentions: Optional[bool] = None,
1146
+ output_hidden_states: Optional[bool] = None,
1147
+ return_dict: Optional[bool] = None,
1148
+ ) -> Union[tuple, SemanticSegmenterOutput]:
1149
+ r"""
1150
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
1151
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
1152
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
1153
+
1154
+ Returns:
1155
+
1156
+ Examples:
1157
+
1158
+ ```python
1159
+ >>> from transformers import AutoImageProcessor, Data2VecVisionForSemanticSegmentation
1160
+ >>> from PIL import Image
1161
+ >>> import requests
1162
+
1163
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1164
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1165
+
1166
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base")
1167
+ >>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
1168
+
1169
+ >>> inputs = image_processor(images=image, return_tensors="pt")
1170
+ >>> outputs = model(**inputs)
1171
+ >>> # logits are of shape (batch_size, num_labels, height, width)
1172
+ >>> logits = outputs.logits
1173
+ ```"""
1174
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1175
+ output_hidden_states = (
1176
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1177
+ )
1178
+
1179
+ outputs = self.data2vec_vision(
1180
+ pixel_values,
1181
+ head_mask=head_mask,
1182
+ output_attentions=output_attentions,
1183
+ output_hidden_states=True, # we need the intermediate hidden states
1184
+ return_dict=return_dict,
1185
+ )
1186
+
1187
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
1188
+
1189
+ # only keep certain features, and reshape
1190
+ # note that we do +1 as the encoder_hidden_states also includes the initial embeddings
1191
+ features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
1192
+ batch_size = pixel_values.shape[0]
1193
+ patch_resolution = self.config.image_size // self.config.patch_size
1194
+ features = [
1195
+ x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
1196
+ ]
1197
+
1198
+ # apply FPNs
1199
+ ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
1200
+ for i in range(len(features)):
1201
+ features[i] = ops[i](features[i])
1202
+
1203
+ logits = self.decode_head(features)
1204
+
1205
+ auxiliary_logits = None
1206
+ if self.auxiliary_head is not None:
1207
+ auxiliary_logits = self.auxiliary_head(features)
1208
+
1209
+ loss = None
1210
+ if labels is not None:
1211
+ if self.config.num_labels == 1:
1212
+ raise ValueError("The number of labels should be greater than one")
1213
+ else:
1214
+ loss = self.compute_loss(logits, auxiliary_logits, labels)
1215
+
1216
+ if not return_dict:
1217
+ if output_hidden_states:
1218
+ output = (logits,) + outputs[1:]
1219
+ else:
1220
+ output = (logits,) + outputs[2:]
1221
+ return ((loss,) + output) if loss is not None else output
1222
+
1223
+ return SemanticSegmenterOutput(
1224
+ loss=loss,
1225
+ logits=logits,
1226
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1227
+ attentions=outputs.attentions,
1228
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/data2vec/modeling_tf_data2vec_vision.py ADDED
@@ -0,0 +1,1717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Data2Vec Vision model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections.abc
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFSemanticSegmenterOutput,
33
+ TFSequenceClassifierOutput,
34
+ )
35
+ from ...modeling_tf_utils import (
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFSequenceClassificationLoss,
39
+ get_initializer,
40
+ keras,
41
+ keras_serializable,
42
+ unpack_inputs,
43
+ )
44
+ from ...tf_utils import shape_list, stable_softmax
45
+ from ...utils import (
46
+ add_code_sample_docstrings,
47
+ add_start_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_data2vec_vision import Data2VecVisionConfig
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ # General docstring
58
+ _CONFIG_FOR_DOC = "Data2VecVisionConfig"
59
+
60
+ # Base docstring
61
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-vision-base"
62
+ _EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
63
+
64
+ # Image classification docstring
65
+ _IMAGE_CLASS_CHECKPOINT = "facebook/data2vec-vision-base-ft1k"
66
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote"
67
+
68
+
69
+ @dataclass
70
+ class TFData2VecVisionModelOutputWithPooling(TFBaseModelOutputWithPooling):
71
+ """
72
+ Class for outputs of [`TFData2VecVisionModel`].
73
+
74
+ Args:
75
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
76
+ Sequence of hidden-states at the output of the last layer of the model.
77
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
78
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
79
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
80
+ will be returned.
81
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
82
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
83
+ `(batch_size, sequence_length, hidden_size)`.
84
+
85
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
86
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
87
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
88
+ sequence_length)`.
89
+
90
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
91
+ heads.
92
+ """
93
+
94
+ last_hidden_state: tf.Tensor = None
95
+ pooler_output: tf.Tensor = None
96
+ hidden_states: Tuple[tf.Tensor] | None = None
97
+ attentions: Tuple[tf.Tensor] | None = None
98
+
99
+
100
+ class TFData2VecVisionDropPath(keras.layers.Layer):
101
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
102
+ References:
103
+ (1) github.com:rwightman/pytorch-image-models
104
+ """
105
+
106
+ def __init__(self, drop_path, **kwargs):
107
+ super().__init__(**kwargs)
108
+ self.drop_path = drop_path
109
+
110
+ def call(self, x, training=None):
111
+ if training:
112
+ keep_prob = 1 - self.drop_path
113
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
114
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
115
+ random_tensor = tf.floor(random_tensor)
116
+ return (x / keep_prob) * random_tensor
117
+ return x
118
+
119
+
120
+ class TFData2VecVisionEmbeddings(keras.layers.Layer):
121
+ """
122
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
123
+
124
+ """
125
+
126
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
127
+ super().__init__(**kwargs)
128
+ self.config = config
129
+
130
+ self.patch_embeddings = TFData2VecVisionPatchEmbeddings(config, name="patch_embeddings")
131
+ self.num_patches = self.patch_embeddings.num_patches
132
+ self.config = config
133
+
134
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
135
+
136
+ def build(self, input_shape=None):
137
+ self.cls_token = self.add_weight(
138
+ shape=(1, 1, self.config.hidden_size),
139
+ initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
140
+ trainable=True,
141
+ name="cls_token",
142
+ )
143
+ if self.config.use_mask_token:
144
+ self.mask_token = self.add_weight(
145
+ shape=(1, 1, self.config.hidden_size),
146
+ initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
147
+ trainable=True,
148
+ name="mask_token",
149
+ )
150
+ else:
151
+ self.mask_token = None
152
+
153
+ if self.config.use_absolute_position_embeddings:
154
+ self.position_embeddings = self.add_weight(
155
+ shape=(1, self.num_patches + 1, self.config.hidden_size),
156
+ initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
157
+ trainable=True,
158
+ name="position_embeddings",
159
+ )
160
+ else:
161
+ self.position_embeddings = None
162
+
163
+ if self.built:
164
+ return
165
+ self.built = True
166
+ if getattr(self, "patch_embeddings", None) is not None:
167
+ with tf.name_scope(self.patch_embeddings.name):
168
+ self.patch_embeddings.build(None)
169
+
170
+ def call(self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None = None) -> tf.Tensor:
171
+ embeddings = self.patch_embeddings(pixel_values)
172
+ batch_size, seq_len, projection_dim = shape_list(embeddings)
173
+
174
+ cls_tokens = tf.tile(self.cls_token, (batch_size, 1, 1))
175
+
176
+ if bool_masked_pos is not None:
177
+ mask_tokens = tf.broadcast_to(self.mask_token, (batch_size, seq_len, projection_dim))
178
+ # replace the masked visual tokens by mask_tokens
179
+ w = bool_masked_pos[..., None]
180
+ w = tf.cast(w, mask_tokens.dtype)
181
+ # since TF doesn't support eager tensor assignment
182
+ embeddings = embeddings * (1 - w) + mask_tokens * w
183
+
184
+ embeddings = tf.concat([cls_tokens, embeddings], axis=1)
185
+ if self.position_embeddings is not None:
186
+ embeddings = embeddings + self.position_embeddings
187
+ embeddings = self.dropout(embeddings)
188
+
189
+ return embeddings
190
+
191
+
192
+ class TFData2VecVisionPatchEmbeddings(keras.layers.Layer):
193
+ """
194
+ Image to Patch Embedding.
195
+ """
196
+
197
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
198
+ super().__init__(**kwargs)
199
+ self.config = config
200
+
201
+ image_size, patch_size = config.image_size, config.patch_size
202
+ num_channels, hidden_size = config.num_channels, config.hidden_size
203
+
204
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
205
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
206
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
207
+ patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
208
+ self.image_size = image_size
209
+ self.patch_size = patch_size
210
+ self.num_patches = num_patches
211
+ self.patch_shape = patch_shape
212
+ self.num_channels = num_channels
213
+
214
+ self.projection = keras.layers.Conv2D(
215
+ filters=hidden_size,
216
+ kernel_size=patch_size,
217
+ strides=patch_size,
218
+ padding="valid",
219
+ data_format="channels_last",
220
+ kernel_initializer="glorot_uniform", # following torch.nn.Linear
221
+ bias_initializer="zeros",
222
+ name="projection",
223
+ )
224
+
225
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
226
+ batch_size, num_channels, height, width = shape_list(pixel_values)
227
+ if tf.executing_eagerly():
228
+ if num_channels != self.num_channels:
229
+ raise ValueError(
230
+ "Make sure that the channel dimension of the pixel values match with the one set in the"
231
+ " configuration."
232
+ )
233
+ if height != self.image_size[0] or width != self.image_size[1]:
234
+ raise ValueError(
235
+ f"Input image size ({height}*{width}) doesn't match model"
236
+ f" ({self.image_size[0]}*{self.image_size[1]})."
237
+ )
238
+
239
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
240
+ # So change the input format from `NCHW` to `NHWC`.
241
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
242
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
243
+
244
+ projection = self.projection(pixel_values)
245
+
246
+ # Change the 2D spatial dimensions to a single temporal dimension.
247
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
248
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
249
+
250
+ return tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1))
251
+
252
+ def build(self, input_shape=None):
253
+ if self.built:
254
+ return
255
+ self.built = True
256
+ if getattr(self, "projection", None) is not None:
257
+ with tf.name_scope(self.projection.name):
258
+ self.projection.build([None, None, None, self.num_channels])
259
+
260
+
261
+ class TFData2VecVisionSelfAttention(keras.layers.Layer):
262
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
263
+ super().__init__(**kwargs)
264
+
265
+ if config.hidden_size % config.num_attention_heads != 0:
266
+ raise ValueError(
267
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
268
+ f"of attention heads ({config.num_attention_heads})"
269
+ )
270
+
271
+ self.num_attention_heads = config.num_attention_heads
272
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
273
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
274
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
275
+
276
+ self.query = keras.layers.Dense(
277
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
278
+ )
279
+ self.key = keras.layers.Dense(
280
+ units=self.all_head_size,
281
+ kernel_initializer=get_initializer(config.initializer_range),
282
+ name="key",
283
+ use_bias=False,
284
+ )
285
+ self.value = keras.layers.Dense(
286
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
287
+ )
288
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
289
+
290
+ if window_size:
291
+ self.relative_position_bias = TFData2VecVisionRelativePositionBias(
292
+ config, window_size=window_size, name="relative_position_bias"
293
+ )
294
+ else:
295
+ self.relative_position_bias = None
296
+ self.config = config
297
+
298
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
299
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
300
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
301
+
302
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
303
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
304
+
305
+ def call(
306
+ self,
307
+ hidden_states: tf.Tensor,
308
+ head_mask: tf.Tensor,
309
+ output_attentions: bool,
310
+ relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
311
+ training: bool = False,
312
+ ) -> Tuple[tf.Tensor]:
313
+ batch_size = shape_list(hidden_states)[0]
314
+ mixed_query_layer = self.query(inputs=hidden_states)
315
+ mixed_key_layer = self.key(inputs=hidden_states)
316
+ mixed_value_layer = self.value(inputs=hidden_states)
317
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
318
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
319
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
320
+
321
+ # Take the dot product between "query" and "key" to get the raw attention scores.
322
+ # (batch size, num_heads, seq_len_q, seq_len_k)
323
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
324
+ attention_scores = attention_scores / self.sqrt_att_head_size
325
+
326
+ # Add relative position bias if present.
327
+ if self.relative_position_bias is not None:
328
+ # Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras
329
+ # might complain about `Layer.call()` not being invoked properly. In this case this input
330
+ # i.e., 0.0 is not going to be used in any calculations so we're safe.
331
+ attention_scores = attention_scores + self.relative_position_bias(0.0)[None, ...]
332
+
333
+ # Add shared relative position bias if provided.
334
+ if relative_position_bias is not None:
335
+ attention_scores = attention_scores + relative_position_bias
336
+
337
+ # Normalize the attention scores to probabilities.
338
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
339
+
340
+ # This is actually dropping out entire tokens to attend to, which might
341
+ # seem a bit unusual, but is taken from the original Transformer paper.
342
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
343
+
344
+ # Mask heads if we want to
345
+ if head_mask is not None:
346
+ attention_probs = tf.multiply(attention_probs, head_mask)
347
+
348
+ attention_output = tf.matmul(attention_probs, value_layer)
349
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
350
+
351
+ # (batch_size, seq_len_q, all_head_size)
352
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
353
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
354
+
355
+ return outputs
356
+
357
+ def build(self, input_shape=None):
358
+ if self.built:
359
+ return
360
+ self.built = True
361
+ if getattr(self, "query", None) is not None:
362
+ with tf.name_scope(self.query.name):
363
+ self.query.build([None, None, self.config.hidden_size])
364
+ if getattr(self, "key", None) is not None:
365
+ with tf.name_scope(self.key.name):
366
+ self.key.build([None, None, self.config.hidden_size])
367
+ if getattr(self, "value", None) is not None:
368
+ with tf.name_scope(self.value.name):
369
+ self.value.build([None, None, self.config.hidden_size])
370
+ if getattr(self, "relative_position_bias", None) is not None:
371
+ with tf.name_scope(self.relative_position_bias.name):
372
+ self.relative_position_bias.build(None)
373
+
374
+
375
+ class TFData2VecVisionSelfOutput(keras.layers.Layer):
376
+ """
377
+ The residual connection is defined in TFData2VecVisionLayer instead of here (as is the case with other models), due
378
+ to the layernorm applied before each block.
379
+ """
380
+
381
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
382
+ super().__init__(**kwargs)
383
+
384
+ self.dense = keras.layers.Dense(
385
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
386
+ )
387
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
388
+ self.config = config
389
+
390
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, gamma=None, training: bool = False) -> tf.Tensor:
391
+ hidden_states = self.dense(inputs=hidden_states)
392
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
393
+
394
+ return hidden_states
395
+
396
+ def build(self, input_shape=None):
397
+ if self.built:
398
+ return
399
+ self.built = True
400
+ if getattr(self, "dense", None) is not None:
401
+ with tf.name_scope(self.dense.name):
402
+ self.dense.build([None, None, self.config.hidden_size])
403
+
404
+
405
+ class TFData2VecVisionAttention(keras.layers.Layer):
406
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
407
+ super().__init__(**kwargs)
408
+
409
+ self.attention = TFData2VecVisionSelfAttention(config, window_size=window_size, name="attention")
410
+ self.dense_output = TFData2VecVisionSelfOutput(config, name="output")
411
+
412
+ def prune_heads(self, heads):
413
+ raise NotImplementedError
414
+
415
+ def call(
416
+ self,
417
+ input_tensor: tf.Tensor,
418
+ head_mask: tf.Tensor,
419
+ output_attentions: bool,
420
+ relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
421
+ training: bool = False,
422
+ ) -> Tuple[tf.Tensor]:
423
+ self_outputs = self.attention(
424
+ hidden_states=input_tensor,
425
+ head_mask=head_mask,
426
+ output_attentions=output_attentions,
427
+ relative_position_bias=relative_position_bias,
428
+ training=training,
429
+ )
430
+ attention_output = self.dense_output(
431
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
432
+ )
433
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
434
+
435
+ return outputs
436
+
437
+ def build(self, input_shape=None):
438
+ if self.built:
439
+ return
440
+ self.built = True
441
+ if getattr(self, "attention", None) is not None:
442
+ with tf.name_scope(self.attention.name):
443
+ self.attention.build(None)
444
+ if getattr(self, "dense_output", None) is not None:
445
+ with tf.name_scope(self.dense_output.name):
446
+ self.dense_output.build(None)
447
+
448
+
449
+ # Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->Data2VecVision
450
+ class TFData2VecVisionIntermediate(keras.layers.Layer):
451
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
452
+ super().__init__(**kwargs)
453
+
454
+ self.dense = keras.layers.Dense(
455
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
456
+ )
457
+
458
+ if isinstance(config.hidden_act, str):
459
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
460
+ else:
461
+ self.intermediate_act_fn = config.hidden_act
462
+ self.config = config
463
+
464
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
465
+ hidden_states = self.dense(inputs=hidden_states)
466
+ hidden_states = self.intermediate_act_fn(hidden_states)
467
+
468
+ return hidden_states
469
+
470
+ def build(self, input_shape=None):
471
+ if self.built:
472
+ return
473
+ self.built = True
474
+ if getattr(self, "dense", None) is not None:
475
+ with tf.name_scope(self.dense.name):
476
+ self.dense.build([None, None, self.config.hidden_size])
477
+
478
+
479
+ class TFData2VecVisionOutput(keras.layers.Layer):
480
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
481
+ super().__init__(**kwargs)
482
+
483
+ self.dense = keras.layers.Dense(
484
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
485
+ )
486
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
487
+ self.config = config
488
+
489
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
490
+ hidden_states = self.dense(inputs=hidden_states)
491
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
492
+
493
+ return hidden_states
494
+
495
+ def build(self, input_shape=None):
496
+ if self.built:
497
+ return
498
+ self.built = True
499
+ if getattr(self, "dense", None) is not None:
500
+ with tf.name_scope(self.dense.name):
501
+ self.dense.build([None, None, self.config.intermediate_size])
502
+
503
+
504
+ class TFData2VecVisionLayer(keras.layers.Layer):
505
+ """This corresponds to the Block class in the timm implementation."""
506
+
507
+ def __init__(
508
+ self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0, **kwargs
509
+ ):
510
+ super().__init__(**kwargs)
511
+ self.config = config
512
+
513
+ self.attention = TFData2VecVisionAttention(config, window_size=window_size, name="attention")
514
+ self.intermediate = TFData2VecVisionIntermediate(config, name="intermediate")
515
+ self.data2vec_output = TFData2VecVisionOutput(config, name="output")
516
+
517
+ self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before")
518
+ self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after")
519
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
520
+ # behaviour.
521
+ self.drop_path = (
522
+ TFData2VecVisionDropPath(drop_path_rate, name="drop_path")
523
+ if drop_path_rate > 0.0
524
+ else keras.layers.Activation("linear", name="drop_path")
525
+ )
526
+ self.init_values = config.layer_scale_init_value
527
+
528
+ def build(self, input_shape: tf.TensorShape = None):
529
+ if self.init_values > 0:
530
+ self.lambda_1 = self.add_weight(
531
+ shape=(self.config.hidden_size),
532
+ initializer="ones",
533
+ trainable=True,
534
+ name="lambda_1",
535
+ )
536
+ self.lambda_2 = self.add_weight(
537
+ shape=(self.config.hidden_size),
538
+ initializer="ones",
539
+ trainable=True,
540
+ name="lambda_2",
541
+ )
542
+ self.lambda_1.assign(self.init_values * tf.ones((self.config.hidden_size)))
543
+ self.lambda_2.assign(self.init_values * tf.ones((self.config.hidden_size)))
544
+ else:
545
+ self.lambda_1, self.lambda_2 = None, None
546
+
547
+ if self.built:
548
+ return
549
+ self.built = True
550
+ if getattr(self, "attention", None) is not None:
551
+ with tf.name_scope(self.attention.name):
552
+ self.attention.build(None)
553
+ if getattr(self, "intermediate", None) is not None:
554
+ with tf.name_scope(self.intermediate.name):
555
+ self.intermediate.build(None)
556
+ if getattr(self, "data2vec_output", None) is not None:
557
+ with tf.name_scope(self.data2vec_output.name):
558
+ self.data2vec_output.build(None)
559
+ if getattr(self, "layernorm_before", None) is not None:
560
+ with tf.name_scope(self.layernorm_before.name):
561
+ self.layernorm_before.build([None, None, self.config.hidden_size])
562
+ if getattr(self, "layernorm_after", None) is not None:
563
+ with tf.name_scope(self.layernorm_after.name):
564
+ self.layernorm_after.build([None, None, self.config.hidden_size])
565
+ if getattr(self, "drop_path", None) is not None:
566
+ with tf.name_scope(self.drop_path.name):
567
+ self.drop_path.build(None)
568
+
569
+ def call(
570
+ self,
571
+ hidden_states: tf.Tensor,
572
+ head_mask: tf.Tensor,
573
+ output_attentions: bool,
574
+ relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
575
+ training: bool = False,
576
+ ) -> Tuple[tf.Tensor]:
577
+ self_attention_outputs = self.attention(
578
+ # in Data2VecVision, layernorm is applied before self-attention
579
+ input_tensor=self.layernorm_before(inputs=hidden_states),
580
+ head_mask=head_mask,
581
+ output_attentions=output_attentions,
582
+ relative_position_bias=relative_position_bias,
583
+ training=training,
584
+ )
585
+ attention_output = self_attention_outputs[0]
586
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
587
+
588
+ # apply lambda_1 if present
589
+ if self.lambda_1 is not None:
590
+ attention_output = self.lambda_1 * attention_output
591
+
592
+ # first residual connection
593
+ hidden_states = self.drop_path(attention_output) + hidden_states
594
+
595
+ # in Data2VecVision, layernorm is also applied after self-attention
596
+ layer_output = self.layernorm_after(hidden_states)
597
+
598
+ layer_output = self.intermediate(layer_output)
599
+ layer_output = self.data2vec_output(layer_output)
600
+
601
+ if self.lambda_2 is not None:
602
+ layer_output = self.lambda_2 * layer_output
603
+
604
+ # second residual connection
605
+ layer_output = self.drop_path(layer_output) + hidden_states
606
+
607
+ outputs = (layer_output,) + outputs
608
+
609
+ return outputs
610
+
611
+
612
+ # Taken and modified from here:
613
+ # https://github.com/leondgarse/keras_cv_attention_models/blob/main/keras_cv_attention_models/beit/beit.py#L28
614
+ class TFData2VecVisionRelativePositionBias(keras.layers.Layer):
615
+ def __init__(self, config: Data2VecVisionConfig, window_size: tuple, **kwargs) -> None:
616
+ super().__init__(**kwargs)
617
+ self.config = config
618
+
619
+ self.window_size = window_size
620
+ # +3 for cls_token_pos_len
621
+ # window_size can be something like (14, 14)
622
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
623
+
624
+ self.relative_position_index = self.get_position_index()
625
+
626
+ def build(self, input_shape):
627
+ self.relative_position_bias_table = self.add_weight(
628
+ shape=(self.num_relative_distance, self.config.num_attention_heads),
629
+ initializer="zeros",
630
+ trainable=True,
631
+ name="relative_position_bias_table",
632
+ ) # [2*Wh-1 * 2*Ww-1, nH]
633
+ # cls to token & token 2 cls & cls to cls
634
+
635
+ super().build(input_shape)
636
+
637
+ def get_position_index(self):
638
+ # get pair-wise relative position index for each token inside the window
639
+ xx, yy = tf.meshgrid(range(self.window_size[0]), range(self.window_size[1]))
640
+ coords = tf.stack([yy, xx], axis=0) # [2, Wh, Ww]
641
+ coords_flatten = tf.reshape(coords, [2, -1]) # [2, Wh*Ww]
642
+
643
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Wh*Ww, Wh*Ww]
644
+ relative_coords = tf.transpose(relative_coords, perm=[1, 2, 0]) # [Wh*Ww, Wh*Ww, 2]
645
+
646
+ xx = (relative_coords[:, :, 0] + self.window_size[0] - 1) * (2 * self.window_size[1] - 1)
647
+ yy = relative_coords[:, :, 1] + self.window_size[1] - 1
648
+ relative_coords = tf.stack([xx, yy], axis=-1)
649
+
650
+ relative_position_index = tf.reduce_sum(relative_coords, axis=-1) # [Wh*Ww, Wh*Ww]
651
+
652
+ top = tf.ones((1, relative_position_index.shape[1]), dtype=relative_position_index.dtype) * (
653
+ self.num_relative_distance - 3
654
+ )
655
+ left = tf.ones((relative_position_index.shape[0], 1), dtype=relative_position_index.dtype) * (
656
+ self.num_relative_distance - 2
657
+ )
658
+ corner = tf.ones((1, 1), dtype=relative_position_index.dtype) * (self.num_relative_distance - 1)
659
+
660
+ left_corner = tf.concat([corner, left], axis=0)
661
+ relative_position_index = tf.concat([top, relative_position_index], axis=0)
662
+ relative_position_index = tf.concat([left_corner, relative_position_index], axis=1) # [Wh*Ww + 1, Wh*Ww + 1]
663
+ return relative_position_index
664
+
665
+ def call(self, inputs=None) -> tf.Tensor:
666
+ relative_position_bias = tf.gather(self.relative_position_bias_table, self.relative_position_index, axis=0)
667
+ return tf.transpose(relative_position_bias, [2, 0, 1])
668
+
669
+
670
+ class TFData2VecVisionEncoder(keras.layers.Layer):
671
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
672
+ super().__init__(**kwargs)
673
+ self.config = config
674
+ if config.use_shared_relative_position_bias:
675
+ self.relative_position_bias = TFData2VecVisionRelativePositionBias(
676
+ config, window_size=window_size, name="relative_position_bias"
677
+ )
678
+ else:
679
+ self.relative_position_bias = None
680
+
681
+ # stochastic depth decay rule
682
+ dpr = list(tf.linspace(0.0, config.drop_path_rate, config.num_hidden_layers))
683
+ self.layer = [
684
+ TFData2VecVisionLayer(
685
+ config,
686
+ window_size=window_size if config.use_relative_position_bias else None,
687
+ drop_path_rate=dpr[i],
688
+ name=f"layer_._{i}",
689
+ )
690
+ for i in range(config.num_hidden_layers)
691
+ ]
692
+
693
+ def call(
694
+ self,
695
+ hidden_states: tf.Tensor,
696
+ head_mask: tf.Tensor | None = None,
697
+ output_attentions: bool = False,
698
+ output_hidden_states: bool = False,
699
+ return_dict: bool = True,
700
+ ) -> Union[tuple, TFBaseModelOutput]:
701
+ all_hidden_states = () if output_hidden_states else None
702
+ all_self_attentions = () if output_attentions else None
703
+
704
+ for i, layer_module in enumerate(self.layer):
705
+ if output_hidden_states:
706
+ all_hidden_states = all_hidden_states + (hidden_states,)
707
+
708
+ layer_head_mask = head_mask[i] if head_mask is not None else None
709
+ # Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras
710
+ # might complain about `Layer.call()` not being invoked properly. In this case this input
711
+ # i.e., 0.0 is not going to be used in any calculations so we're safe.
712
+ relative_position_bias = (
713
+ self.relative_position_bias(0.0) if self.relative_position_bias is not None else None
714
+ )
715
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
716
+
717
+ hidden_states = layer_outputs[0]
718
+
719
+ if output_attentions:
720
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
721
+
722
+ if output_hidden_states:
723
+ all_hidden_states = all_hidden_states + (hidden_states,)
724
+
725
+ if not return_dict:
726
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
727
+
728
+ return TFBaseModelOutput(
729
+ last_hidden_state=hidden_states,
730
+ hidden_states=all_hidden_states,
731
+ attentions=all_self_attentions,
732
+ )
733
+
734
+ def build(self, input_shape=None):
735
+ if self.built:
736
+ return
737
+ self.built = True
738
+ if getattr(self, "relative_position_bias", None) is not None:
739
+ with tf.name_scope(self.relative_position_bias.name):
740
+ self.relative_position_bias.build(None)
741
+ if getattr(self, "layer", None) is not None:
742
+ for layer in self.layer:
743
+ with tf.name_scope(layer.name):
744
+ layer.build(None)
745
+
746
+
747
+ @keras_serializable
748
+ class TFData2VecVisionMainLayer(keras.layers.Layer):
749
+ config_class = Data2VecVisionConfig
750
+
751
+ def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = True, **kwargs):
752
+ super().__init__(**kwargs)
753
+
754
+ self.config = config
755
+ self.add_pooling_layer = add_pooling_layer
756
+
757
+ self.embeddings = TFData2VecVisionEmbeddings(config, name="embeddings")
758
+ self.encoder = TFData2VecVisionEncoder(
759
+ config, window_size=self.embeddings.patch_embeddings.patch_shape, name="encoder"
760
+ )
761
+ self.layernorm = (
762
+ tf.identity
763
+ if config.use_mean_pooling
764
+ else keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
765
+ )
766
+
767
+ # We are setting the `data_format` like so because from here on we will revert to the
768
+ # NCHW output format
769
+ self.pooler = TFData2VecVisionPooler(config, name="pooler") if add_pooling_layer else None
770
+
771
+ def get_input_embeddings(self) -> keras.layers.Layer:
772
+ return self.embeddings.patch_embeddings
773
+
774
+ def _prune_heads(self, heads_to_prune):
775
+ """
776
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
777
+ class PreTrainedModel
778
+ """
779
+ raise NotImplementedError
780
+
781
+ @unpack_inputs
782
+ def call(
783
+ self,
784
+ pixel_values: tf.Tensor | None = None,
785
+ bool_masked_pos: tf.Tensor | None = None,
786
+ head_mask: tf.Tensor | None = None,
787
+ output_attentions: Optional[bool] = None,
788
+ output_hidden_states: Optional[bool] = None,
789
+ return_dict: Optional[bool] = None,
790
+ training: bool = False,
791
+ ) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
792
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
793
+ output_hidden_states = (
794
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
795
+ )
796
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
797
+
798
+ if pixel_values is None:
799
+ raise ValueError("You have to specify pixel_values")
800
+
801
+ # Prepare head mask if needed
802
+ # 1.0 in head_mask indicate we keep the head
803
+ # attention_probs has shape bsz x n_heads x N x N
804
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
805
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
806
+ if head_mask is not None:
807
+ raise NotImplementedError
808
+ else:
809
+ head_mask = [None] * self.config.num_hidden_layers
810
+
811
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos, training=training)
812
+
813
+ encoder_outputs = self.encoder(
814
+ embedding_output,
815
+ head_mask=head_mask,
816
+ output_attentions=output_attentions,
817
+ output_hidden_states=output_hidden_states,
818
+ return_dict=return_dict,
819
+ training=training,
820
+ )
821
+
822
+ sequence_output = encoder_outputs[0]
823
+ sequence_output = self.layernorm(sequence_output)
824
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
825
+
826
+ if not return_dict:
827
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
828
+ return head_outputs + encoder_outputs[1:]
829
+
830
+ return TFData2VecVisionModelOutputWithPooling(
831
+ last_hidden_state=sequence_output,
832
+ pooler_output=pooled_output,
833
+ hidden_states=encoder_outputs.hidden_states,
834
+ attentions=encoder_outputs.attentions,
835
+ )
836
+
837
+ def build(self, input_shape=None):
838
+ if self.built:
839
+ return
840
+ self.built = True
841
+ if getattr(self, "embeddings", None) is not None:
842
+ with tf.name_scope(self.embeddings.name):
843
+ self.embeddings.build(None)
844
+ if getattr(self, "encoder", None) is not None:
845
+ with tf.name_scope(self.encoder.name):
846
+ self.encoder.build(None)
847
+ if getattr(self, "layernorm", None) is not None:
848
+ if hasattr(self.layernorm, "name"):
849
+ with tf.name_scope(self.layernorm.name):
850
+ self.layernorm.build((None, self.config.hidden_size))
851
+ if getattr(self, "pooler", None) is not None:
852
+ with tf.name_scope(self.pooler.name):
853
+ self.pooler.build(None)
854
+
855
+
856
+ class TFData2VecVisionPooler(keras.layers.Layer):
857
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
858
+ super().__init__(**kwargs)
859
+ self.layernorm = (
860
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
861
+ if config.use_mean_pooling
862
+ else None
863
+ )
864
+ self.config = config
865
+
866
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
867
+ if self.layernorm is not None:
868
+ # Mean pool the final hidden states of the patch tokens
869
+ patch_tokens = hidden_states[:, 1:, :]
870
+ pooled_output = self.layernorm(tf.reduce_mean(patch_tokens, axis=1))
871
+ else:
872
+ # Pool by simply taking the final hidden state of the [CLS] token
873
+ pooled_output = hidden_states[:, 0]
874
+
875
+ return pooled_output
876
+
877
+ def build(self, input_shape=None):
878
+ if self.built:
879
+ return
880
+ self.built = True
881
+ if getattr(self, "layernorm", None) is not None:
882
+ if hasattr(self.layernorm, "name"):
883
+ with tf.name_scope(self.layernorm.name):
884
+ self.layernorm.build((None, self.config.hidden_size))
885
+
886
+
887
+ class TFData2VecVisionPreTrainedModel(TFPreTrainedModel):
888
+ """
889
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
890
+ models.
891
+ """
892
+
893
+ config_class = Data2VecVisionConfig
894
+ base_model_prefix = "data2vec_vision"
895
+ main_input_name = "pixel_values"
896
+ _keys_to_ignore_on_load_unexpected = [r"relative_position_index"]
897
+
898
+
899
+ DATA2VEC_VISION_START_DOCSTRING = r"""
900
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
901
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
902
+ etc.).
903
+
904
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
905
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
906
+ behavior.
907
+
908
+ <Tip>
909
+
910
+ TensorFlow models and layers in `transformers` accept two formats as input:
911
+
912
+ - having all inputs as keyword arguments (like PyTorch models), or
913
+ - having all inputs as a list, tuple or dict in the first positional argument.
914
+
915
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
916
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
917
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
918
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
919
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
920
+ positional argument:
921
+
922
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
923
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
924
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
925
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
926
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
927
+
928
+ Note that when creating models and layers with
929
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
930
+ about any of this, as you can just pass inputs like you would to any other Python function!
931
+
932
+ </Tip>
933
+
934
+ Args:
935
+ config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.
936
+ Initializing with a config file does not load the weights associated with the model, only the
937
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
938
+ """
939
+
940
+ DATA2VEC_VISION_INPUTS_DOCSTRING = r"""
941
+ Args:
942
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
943
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
944
+ [`BeitImageProcessor.__call__`] for details.
945
+
946
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
947
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
948
+ - 1 indicates the head is **not masked**,
949
+ - 0 indicates the head is **masked**.
950
+
951
+ output_attentions (`bool`, *optional*):
952
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
953
+ tensors for more detail.
954
+
955
+ output_hidden_states (`bool`, *optional*):
956
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
957
+ more detail.
958
+
959
+ return_dict (`bool`, *optional*):
960
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
961
+ in eager mode, in graph mode the value will always be set to True.
962
+
963
+ training (`bool`, *optional*, defaults to `False``):
964
+ Whether or not to use the model in training mode (some modules like dropout modules have different
965
+ behaviors between training and evaluation).
966
+ """
967
+
968
+
969
+ @add_start_docstrings(
970
+ "The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.",
971
+ DATA2VEC_VISION_START_DOCSTRING,
972
+ )
973
+ class TFData2VecVisionModel(TFData2VecVisionPreTrainedModel):
974
+ def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False, *inputs, **kwargs):
975
+ super().__init__(config, *inputs, **kwargs)
976
+ self.config = config
977
+
978
+ self.data2vec_vision = TFData2VecVisionMainLayer(
979
+ config, add_pooling_layer=add_pooling_layer, name="data2vec_vision"
980
+ )
981
+
982
+ def get_input_embeddings(self):
983
+ return self.data2vec_vision.get_input_embeddings()
984
+
985
+ @unpack_inputs
986
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
987
+ @add_code_sample_docstrings(
988
+ checkpoint=_CHECKPOINT_FOR_DOC,
989
+ output_type=TFData2VecVisionModelOutputWithPooling,
990
+ config_class=_CONFIG_FOR_DOC,
991
+ modality="vision",
992
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
993
+ )
994
+ def call(
995
+ self,
996
+ pixel_values: TFModelInputType | None = None,
997
+ bool_masked_pos: tf.Tensor | None = None,
998
+ head_mask: np.ndarray | tf.Tensor | None = None,
999
+ output_attentions: Optional[bool] = None,
1000
+ output_hidden_states: Optional[bool] = None,
1001
+ return_dict: Optional[bool] = None,
1002
+ training: bool = False,
1003
+ ) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
1004
+ r"""
1005
+ bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`, *optional*):
1006
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
1007
+ """
1008
+ outputs = self.data2vec_vision(
1009
+ pixel_values=pixel_values,
1010
+ bool_masked_pos=bool_masked_pos,
1011
+ head_mask=head_mask,
1012
+ output_attentions=output_attentions,
1013
+ output_hidden_states=output_hidden_states,
1014
+ return_dict=return_dict,
1015
+ training=training,
1016
+ )
1017
+
1018
+ return outputs
1019
+
1020
+ def build(self, input_shape=None):
1021
+ if self.built:
1022
+ return
1023
+ self.built = True
1024
+ if getattr(self, "data2vec_vision", None) is not None:
1025
+ with tf.name_scope(self.data2vec_vision.name):
1026
+ self.data2vec_vision.build(None)
1027
+
1028
+
1029
+ @add_start_docstrings(
1030
+ """
1031
+ Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of
1032
+ the final hidden states of the patch tokens) e.g. for ImageNet.
1033
+ """,
1034
+ DATA2VEC_VISION_START_DOCSTRING,
1035
+ )
1036
+ class TFData2VecVisionForImageClassification(TFData2VecVisionPreTrainedModel, TFSequenceClassificationLoss):
1037
+ def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs):
1038
+ super().__init__(config, *inputs, **kwargs)
1039
+
1040
+ self.num_labels = config.num_labels
1041
+ self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=True, name="data2vec_vision")
1042
+
1043
+ # Classifier head
1044
+ self.classifier = keras.layers.Dense(
1045
+ units=config.num_labels,
1046
+ kernel_initializer=get_initializer(config.initializer_range),
1047
+ name="classifier",
1048
+ )
1049
+ self.config = config
1050
+
1051
+ @unpack_inputs
1052
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
1053
+ @add_code_sample_docstrings(
1054
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1055
+ output_type=TFSequenceClassifierOutput,
1056
+ config_class=_CONFIG_FOR_DOC,
1057
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1058
+ )
1059
+ def call(
1060
+ self,
1061
+ pixel_values: TFModelInputType | None = None,
1062
+ head_mask: np.ndarray | tf.Tensor | None = None,
1063
+ output_attentions: Optional[bool] = None,
1064
+ output_hidden_states: Optional[bool] = None,
1065
+ return_dict: Optional[bool] = None,
1066
+ labels: np.ndarray | tf.Tensor | None = None,
1067
+ training: Optional[bool] = False,
1068
+ ) -> Union[TFSequenceClassifierOutput, tuple]:
1069
+ r"""
1070
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1071
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1072
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1073
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1074
+ """
1075
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1076
+
1077
+ outputs = self.data2vec_vision(
1078
+ pixel_values=pixel_values,
1079
+ head_mask=head_mask,
1080
+ output_attentions=output_attentions,
1081
+ output_hidden_states=output_hidden_states,
1082
+ return_dict=return_dict,
1083
+ training=training,
1084
+ )
1085
+
1086
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
1087
+ logits = self.classifier(pooled_output)
1088
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1089
+
1090
+ if not return_dict:
1091
+ output = (logits,) + outputs[2:]
1092
+ return ((loss,) + output) if loss is not None else output
1093
+
1094
+ return TFSequenceClassifierOutput(
1095
+ loss=loss,
1096
+ logits=logits,
1097
+ hidden_states=outputs.hidden_states,
1098
+ attentions=outputs.attentions,
1099
+ )
1100
+
1101
+ def build(self, input_shape=None):
1102
+ if self.built:
1103
+ return
1104
+ self.built = True
1105
+ if getattr(self, "data2vec_vision", None) is not None:
1106
+ with tf.name_scope(self.data2vec_vision.name):
1107
+ self.data2vec_vision.build(None)
1108
+ if getattr(self, "classifier", None) is not None:
1109
+ with tf.name_scope(self.classifier.name):
1110
+ self.classifier.build([None, None, self.config.hidden_size])
1111
+
1112
+
1113
+ class TFData2VecVisionConvModule(keras.layers.Layer):
1114
+ """
1115
+ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
1116
+ layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
1117
+
1118
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1119
+ """
1120
+
1121
+ def __init__(
1122
+ self,
1123
+ in_channels: int,
1124
+ out_channels: int,
1125
+ kernel_size: Union[int, Tuple[int, int]],
1126
+ padding: str = "valid",
1127
+ bias: bool = False,
1128
+ dilation: Union[int, Tuple[int, int]] = 1,
1129
+ **kwargs,
1130
+ ) -> None:
1131
+ super().__init__(**kwargs)
1132
+ self.conv = keras.layers.Conv2D(
1133
+ filters=out_channels,
1134
+ kernel_size=kernel_size,
1135
+ padding=padding,
1136
+ use_bias=bias,
1137
+ dilation_rate=dilation,
1138
+ name="conv",
1139
+ )
1140
+ self.bn = keras.layers.BatchNormalization(name="bn", momentum=0.9, epsilon=1e-5)
1141
+ self.activation = tf.nn.relu
1142
+ self.in_channels = in_channels
1143
+ self.out_channels = out_channels
1144
+
1145
+ def call(self, input: tf.Tensor) -> tf.Tensor:
1146
+ output = self.conv(input)
1147
+ output = self.bn(output)
1148
+ output = self.activation(output)
1149
+ return output
1150
+
1151
+ def build(self, input_shape=None):
1152
+ if self.built:
1153
+ return
1154
+ self.built = True
1155
+ if getattr(self, "conv", None) is not None:
1156
+ with tf.name_scope(self.conv.name):
1157
+ self.conv.build([None, None, None, self.in_channels])
1158
+ if getattr(self, "bn", None) is not None:
1159
+ with tf.name_scope(self.bn.name):
1160
+ self.bn.build((None, None, None, self.out_channels))
1161
+
1162
+
1163
+ class TFAdaptiveAvgPool2D(keras.layers.Layer):
1164
+ def __init__(self, output_dims: Tuple[int, int], input_ordering: str = "NHWC", **kwargs):
1165
+ super().__init__(**kwargs)
1166
+ self.output_dims = output_dims
1167
+ self.input_ordering = input_ordering
1168
+ if input_ordering not in ("NCHW", "NHWC"):
1169
+ raise ValueError("Unrecognized input_ordering, should be 'NCHW' or 'NHWC'!")
1170
+ self.h_axis = input_ordering.index("H")
1171
+ self.w_axis = input_ordering.index("W")
1172
+
1173
+ def pseudo_1d_pool(self, inputs: tf.Tensor, h_pooling: bool):
1174
+ # Figure out which axis we're pooling on
1175
+ if h_pooling:
1176
+ axis = self.h_axis
1177
+ output_dim = self.output_dims[0]
1178
+ else:
1179
+ axis = self.w_axis
1180
+ output_dim = self.output_dims[1]
1181
+ input_dim = inputs.shape[axis]
1182
+
1183
+ # Figure out the potential pooling windows
1184
+ # This is the key idea - the torch op always uses only two
1185
+ # consecutive pooling window sizes, like 3 and 4. Therefore,
1186
+ # if we pool with both possible sizes, we simply need to gather
1187
+ # the 'correct' pool at each position to reimplement the torch op.
1188
+ small_window = math.ceil(input_dim / output_dim)
1189
+ big_window = small_window + 1
1190
+ if h_pooling:
1191
+ output_dim = self.output_dims[0]
1192
+ small_window_shape = (small_window, 1)
1193
+ big_window_shape = (big_window, 1)
1194
+ else:
1195
+ output_dim = self.output_dims[1]
1196
+ small_window_shape = (1, small_window)
1197
+ big_window_shape = (1, big_window)
1198
+
1199
+ # For resizes to 1, or integer resizes, we can take quick shortcuts
1200
+ if output_dim == input_dim:
1201
+ return inputs
1202
+ elif output_dim == 1:
1203
+ return tf.reduce_mean(inputs, axis=axis, keepdims=True)
1204
+ elif input_dim % output_dim == 0:
1205
+ return tf.nn.avg_pool2d(
1206
+ inputs,
1207
+ ksize=small_window_shape,
1208
+ strides=small_window_shape,
1209
+ padding="VALID",
1210
+ data_format=self.input_ordering,
1211
+ )
1212
+ # When upscaling by an integer factor we can also take a quick shortcut
1213
+ elif output_dim > input_dim and output_dim % input_dim == 0:
1214
+ return tf.repeat(inputs, repeats=output_dim // input_dim, axis=axis)
1215
+
1216
+ # For non-integer resizes, we pool with both possible window sizes and concatenate them
1217
+ if output_dim < input_dim:
1218
+ small_pool = tf.nn.avg_pool2d(
1219
+ inputs, ksize=small_window_shape, strides=1, padding="VALID", data_format=self.input_ordering
1220
+ )
1221
+ big_pool = tf.nn.avg_pool2d(
1222
+ inputs, ksize=big_window_shape, strides=1, padding="VALID", data_format=self.input_ordering
1223
+ )
1224
+ both_pool = tf.concat([small_pool, big_pool], axis=axis)
1225
+ else:
1226
+ # When we're actually upscaling instead, then we build the pools a bit differently
1227
+ small_pool = inputs
1228
+ big_pool = tf.nn.avg_pool2d(
1229
+ inputs, ksize=big_window_shape, strides=1, padding="VALID", data_format=self.input_ordering
1230
+ )
1231
+ both_pool = tf.concat([small_pool, big_pool], axis=axis)
1232
+
1233
+ # We compute vectors of the start and end positions for each pooling window
1234
+ # Each (start, end) pair here corresponds to a single output position
1235
+ window_starts = tf.math.floor((tf.range(output_dim, dtype=tf.float32) * input_dim) / output_dim)
1236
+ window_starts = tf.cast(window_starts, tf.int64)
1237
+ window_ends = tf.math.ceil((tf.range(1, output_dim + 1, dtype=tf.float32) * input_dim) / output_dim)
1238
+ window_ends = tf.cast(window_ends, tf.int64)
1239
+
1240
+ # pool_selector is a boolean array of shape (output_dim,) where 1 indicates that output position
1241
+ # has a big receptive field and 0 indicates that that output position has a small receptive field
1242
+ pool_selector = tf.cast(window_ends - window_starts - small_window, tf.bool)
1243
+
1244
+ # Since we concatenated the small and big pools, we need to do a bit of
1245
+ # pointer arithmetic to get the indices of the big pools
1246
+ small_indices = window_starts
1247
+ big_indices = window_starts + small_pool.shape[axis]
1248
+
1249
+ # Finally, we use the pool_selector to generate a list of indices, one per output position
1250
+ gather_indices = tf.where(pool_selector, big_indices, small_indices)
1251
+
1252
+ # Gathering from those indices yields the final, correct pooling
1253
+ return tf.gather(both_pool, gather_indices, axis=axis)
1254
+
1255
+ def call(self, inputs: tf.Tensor):
1256
+ if self.input_ordering == "NHWC":
1257
+ input_shape = inputs.shape[1:3]
1258
+ else:
1259
+ input_shape = inputs.shape[2:]
1260
+
1261
+ # We break the task down into each possible case
1262
+ # Firstly, if we're resizing down to 1, it's just tf.reduce_mean
1263
+ if self.output_dims[0] == self.output_dims[1] == 1:
1264
+ if self.input_ordering == "NHWC":
1265
+ reduce_dims = [1, 2]
1266
+ else:
1267
+ reduce_dims = [2, 3]
1268
+ return tf.reduce_mean(inputs, axis=reduce_dims, keepdims=True)
1269
+ # Secondly, if we're resizing by an integer factor on both dimensions, we can take a quick shortcut
1270
+ elif input_shape[0] % self.output_dims[0] == 0 and input_shape[1] % self.output_dims[1] == 0:
1271
+ h_resize = int(input_shape[0] // self.output_dims[0])
1272
+ w_resize = int(input_shape[1] // self.output_dims[1])
1273
+ return tf.nn.avg_pool2d(
1274
+ inputs,
1275
+ ksize=(h_resize, w_resize),
1276
+ strides=(h_resize, w_resize),
1277
+ padding="VALID",
1278
+ data_format=self.input_ordering,
1279
+ )
1280
+ else:
1281
+ # Finally, if we can't take the shortcut, we do a 1D pool on each axis. pseudo_1d_pool will take a shortcut
1282
+ # for dimensions where an integer resize is possible. It can also handle upscaling.
1283
+ h_pooled = self.pseudo_1d_pool(inputs, h_pooling=True)
1284
+ return self.pseudo_1d_pool(h_pooled, h_pooling=False)
1285
+
1286
+
1287
+ class TFData2VecVisionPyramidPoolingModule(keras.layers.Layer):
1288
+ """
1289
+ Pyramid Pooling Module (PPM) used in PSPNet.
1290
+
1291
+ Args:
1292
+ pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
1293
+ Module.
1294
+ channels (int): Channels after modules, before conv_seg.
1295
+
1296
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1297
+ """
1298
+
1299
+ def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, out_channels: int, **kwargs) -> None:
1300
+ super().__init__(**kwargs)
1301
+ self.pool_scales = pool_scales
1302
+ self.in_channels = in_channels
1303
+ self.out_channels = out_channels
1304
+
1305
+ self.layer_list = []
1306
+ for idx, pool_scale in enumerate(pool_scales):
1307
+ pool_scale = pool_scale if isinstance(pool_scale, collections.abc.Iterable) else (pool_scale, pool_scale)
1308
+ self.layer_list.append(
1309
+ [
1310
+ TFAdaptiveAvgPool2D(output_dims=pool_scale),
1311
+ TFData2VecVisionConvModule(
1312
+ in_channels=in_channels, out_channels=self.out_channels, kernel_size=1, name=f"{idx}.1"
1313
+ ),
1314
+ ]
1315
+ )
1316
+
1317
+ def call(self, x: tf.Tensor) -> List[tf.Tensor]:
1318
+ ppm_outs = []
1319
+ inputs = x
1320
+
1321
+ for ppm in self.layer_list:
1322
+ for layer_module in ppm:
1323
+ ppm_out = layer_module(x)
1324
+ x = ppm_out
1325
+
1326
+ upsampled_ppm_out = tf.image.resize(ppm_out, size=shape_list(inputs)[1:-1], method="bilinear")
1327
+ ppm_outs.append(upsampled_ppm_out)
1328
+ return ppm_outs
1329
+
1330
+ def build(self, input_shape=None):
1331
+ for layer in self.layer_list:
1332
+ for layer_module in layer:
1333
+ with tf.name_scope(layer_module.name):
1334
+ layer_module.build(None)
1335
+
1336
+
1337
+ class TFData2VecVisionUperHead(keras.layers.Layer):
1338
+ """
1339
+ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
1340
+ [UPerNet](https://arxiv.org/abs/1807.10221).
1341
+
1342
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1343
+ """
1344
+
1345
+ def __init__(self, config: Data2VecVisionConfig, **kwargs) -> None:
1346
+ super().__init__(**kwargs)
1347
+
1348
+ self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
1349
+ self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
1350
+ self.channels = config.hidden_size
1351
+ self.classifier = keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier")
1352
+
1353
+ # PSP Module
1354
+ self.psp_modules = TFData2VecVisionPyramidPoolingModule(
1355
+ self.pool_scales, self.in_channels[-1], self.channels, name="psp_modules"
1356
+ )
1357
+ self.bottleneck = TFData2VecVisionConvModule(
1358
+ self.in_channels[-1] + len(self.pool_scales) * self.channels,
1359
+ self.channels,
1360
+ kernel_size=3,
1361
+ padding="same",
1362
+ name="bottleneck",
1363
+ )
1364
+ # FPN Module
1365
+ self.lateral_convs = []
1366
+ self.fpn_convs = []
1367
+ for idx, in_channels in enumerate(self.in_channels[:-1]): # skip the top layer
1368
+ l_conv = TFData2VecVisionConvModule(
1369
+ in_channels, out_channels=self.channels, kernel_size=1, name=f"lateral_convs.{idx}"
1370
+ )
1371
+ fpn_conv = TFData2VecVisionConvModule(
1372
+ in_channels=self.channels,
1373
+ out_channels=self.channels,
1374
+ kernel_size=3,
1375
+ padding="same",
1376
+ name=f"fpn_convs.{idx}",
1377
+ )
1378
+ self.lateral_convs.append(l_conv)
1379
+ self.fpn_convs.append(fpn_conv)
1380
+
1381
+ self.fpn_bottleneck = TFData2VecVisionConvModule(
1382
+ in_channels=len(self.in_channels) * self.channels,
1383
+ out_channels=self.channels,
1384
+ kernel_size=3,
1385
+ padding="same",
1386
+ name="fpn_bottleneck",
1387
+ )
1388
+
1389
+ def psp_forward(self, inputs):
1390
+ x = inputs[-1]
1391
+ psp_outs = [x]
1392
+ psp_outs.extend(self.psp_modules(x))
1393
+ psp_outs = tf.concat(psp_outs, axis=-1)
1394
+ output = self.bottleneck(psp_outs)
1395
+
1396
+ return output
1397
+
1398
+ def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
1399
+ # build laterals
1400
+ laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
1401
+
1402
+ laterals.append(self.psp_forward(encoder_hidden_states))
1403
+
1404
+ # build top-down path
1405
+ used_backbone_levels = len(laterals)
1406
+ for i in range(used_backbone_levels - 1, 0, -1):
1407
+ prev_shape = shape_list(laterals[i - 1])[1:-1]
1408
+ laterals[i - 1] = laterals[i - 1] + tf.image.resize(laterals[i], size=prev_shape, method="bilinear")
1409
+
1410
+ # build outputs
1411
+ fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
1412
+ # append psp feature
1413
+ fpn_outs.append(laterals[-1])
1414
+
1415
+ for i in range(used_backbone_levels - 1, 0, -1):
1416
+ fpn_outs[i] = tf.image.resize(fpn_outs[i], size=shape_list(fpn_outs[0])[1:-1], method="bilinear")
1417
+ fpn_outs = tf.concat(fpn_outs, axis=-1)
1418
+ output = self.fpn_bottleneck(fpn_outs)
1419
+ output = self.classifier(output)
1420
+
1421
+ return output
1422
+
1423
+ def build(self, input_shape=None):
1424
+ if self.built:
1425
+ return
1426
+ self.built = True
1427
+ if getattr(self, "classifier", None) is not None:
1428
+ with tf.name_scope(self.classifier.name):
1429
+ self.classifier.build([None, None, None, self.channels])
1430
+ if getattr(self, "psp_modules", None) is not None:
1431
+ with tf.name_scope(self.psp_modules.name):
1432
+ self.psp_modules.build(None)
1433
+ if getattr(self, "bottleneck", None) is not None:
1434
+ with tf.name_scope(self.bottleneck.name):
1435
+ self.bottleneck.build(None)
1436
+ if getattr(self, "fpn_bottleneck", None) is not None:
1437
+ with tf.name_scope(self.fpn_bottleneck.name):
1438
+ self.fpn_bottleneck.build(None)
1439
+ for layer in self.lateral_convs:
1440
+ with tf.name_scope(layer.name):
1441
+ layer.build(None)
1442
+ for layer in self.fpn_convs:
1443
+ with tf.name_scope(layer.name):
1444
+ layer.build(None)
1445
+
1446
+
1447
+ class TFData2VecVisionFCNHead(keras.layers.Layer):
1448
+ """
1449
+ Fully Convolution Networks for Semantic Segmentation. This head is implemented from
1450
+ [FCNNet](https://arxiv.org/abs/1411.4038).
1451
+
1452
+ Args:
1453
+ config (Data2VecVisionConfig): Configuration.
1454
+ kernel_size (int): The kernel size for convs in the head. Default: 3.
1455
+ dilation (int): The dilation rate for convs in the head. Default: 1.
1456
+
1457
+
1458
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1459
+ """
1460
+
1461
+ def __init__(
1462
+ self,
1463
+ config: Data2VecVisionConfig,
1464
+ in_index: int = 2,
1465
+ kernel_size: int = 3,
1466
+ dilation: Union[int, Tuple[int, int]] = 1,
1467
+ **kwargs,
1468
+ ) -> None:
1469
+ super().__init__(**kwargs)
1470
+ self.in_channels = config.hidden_size
1471
+ self.channels = config.auxiliary_channels
1472
+ self.num_convs = config.auxiliary_num_convs
1473
+ self.concat_input = config.auxiliary_concat_input
1474
+ self.in_index = in_index
1475
+
1476
+ convs = []
1477
+ convs.append(
1478
+ TFData2VecVisionConvModule(
1479
+ in_channels=self.in_channels,
1480
+ out_channels=self.channels,
1481
+ kernel_size=kernel_size,
1482
+ padding="same",
1483
+ dilation=dilation,
1484
+ name="convs.0",
1485
+ )
1486
+ )
1487
+ for i in range(self.num_convs - 1):
1488
+ convs.append(
1489
+ TFData2VecVisionConvModule(
1490
+ in_channels=self.channels,
1491
+ out_channels=self.channels,
1492
+ kernel_size=kernel_size,
1493
+ padding="same",
1494
+ dilation=dilation,
1495
+ name=f"conv_module_{i+2}",
1496
+ )
1497
+ )
1498
+ if self.num_convs == 0:
1499
+ self.convs = [tf.identity]
1500
+ else:
1501
+ self.convs = convs
1502
+ if self.concat_input:
1503
+ self.conv_cat = TFData2VecVisionConvModule(
1504
+ self.in_channels + self.channels,
1505
+ out_channels=self.channels,
1506
+ kernel_size=kernel_size,
1507
+ padding="same",
1508
+ name="conv_cat",
1509
+ )
1510
+
1511
+ self.classifier = keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier")
1512
+
1513
+ def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
1514
+ # just take the relevant feature maps
1515
+ hidden_states = encoder_hidden_states[self.in_index]
1516
+ output = hidden_states
1517
+ for layer_module in self.convs:
1518
+ output = layer_module(output)
1519
+ if self.concat_input:
1520
+ output = self.conv_cat(tf.concat([hidden_states, output], axis=-1))
1521
+ output = self.classifier(output)
1522
+ return output
1523
+
1524
+ def build(self, input_shape=None):
1525
+ if self.built:
1526
+ return
1527
+ self.built = True
1528
+ if getattr(self, "classifier", None) is not None:
1529
+ with tf.name_scope(self.classifier.name):
1530
+ self.classifier.build([None, None, None, self.channels])
1531
+ if getattr(self, "conv_cat", None) is not None:
1532
+ with tf.name_scope(self.conv_cat.name):
1533
+ self.conv_cat.build(None)
1534
+
1535
+
1536
+ @add_start_docstrings(
1537
+ """
1538
+ Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
1539
+ """,
1540
+ DATA2VEC_VISION_START_DOCSTRING,
1541
+ )
1542
+ class TFData2VecVisionForSemanticSegmentation(TFData2VecVisionPreTrainedModel):
1543
+ def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs) -> None:
1544
+ super().__init__(config, *inputs, **kwargs)
1545
+ self.num_labels = config.num_labels
1546
+ self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=False, name="data2vec_vision")
1547
+
1548
+ # FPNs
1549
+ self.fpn1 = [
1550
+ keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.0"),
1551
+ keras.layers.BatchNormalization(name="fpn1.1", momentum=0.9, epsilon=1e-5),
1552
+ keras.layers.Activation("gelu"),
1553
+ keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.3"),
1554
+ ]
1555
+ self.fpn2 = [keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn2.0")]
1556
+
1557
+ self.fpn3 = tf.identity
1558
+ self.fpn4 = keras.layers.MaxPool2D(pool_size=2, strides=2)
1559
+
1560
+ # Semantic segmentation head(s)
1561
+ self.decode_head = TFData2VecVisionUperHead(config, name="decode_head")
1562
+ self.auxiliary_head = (
1563
+ TFData2VecVisionFCNHead(config, name="auxiliary_head") if config.use_auxiliary_head else None
1564
+ )
1565
+
1566
+ def compute_loss(self, logits, auxiliary_logits, labels):
1567
+ # upsample logits to the images' original size
1568
+ if len(shape_list(labels)) > 3:
1569
+ label_interp_shape = shape_list(labels)[1:-1]
1570
+ else:
1571
+ label_interp_shape = shape_list(labels)[-2:]
1572
+
1573
+ upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear")
1574
+ if auxiliary_logits is not None:
1575
+ upsampled_auxiliary_logits = tf.image.resize(auxiliary_logits, size=label_interp_shape, method="bilinear")
1576
+ # compute weighted loss
1577
+ loss_fct = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none")
1578
+
1579
+ # Copied from https://www.tensorflow.org/text/tutorials/transformer#loss_and_metrics.
1580
+ # Utility to mask the index to ignore during computing the loss.
1581
+ def masked_loss(real, pred):
1582
+ mask = tf.math.logical_not(tf.math.equal(real, self.config.semantic_loss_ignore_index))
1583
+ loss_ = loss_fct(real, pred)
1584
+ mask = tf.cast(mask, dtype=loss_.dtype)
1585
+ loss_ *= mask
1586
+ reduced_masked_loss = tf.reduce_sum(loss_) / tf.reduce_sum(mask)
1587
+ return tf.reshape(reduced_masked_loss, (1,))
1588
+
1589
+ main_loss = masked_loss(labels, upsampled_logits)
1590
+ auxiliary_loss = masked_loss(labels, upsampled_auxiliary_logits)
1591
+ loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
1592
+
1593
+ return loss
1594
+
1595
+ @unpack_inputs
1596
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
1597
+ @replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
1598
+ def call(
1599
+ self,
1600
+ pixel_values: tf.Tensor | None = None,
1601
+ head_mask: tf.Tensor | None = None,
1602
+ labels: tf.Tensor | None = None,
1603
+ output_attentions: Optional[bool] = None,
1604
+ output_hidden_states: Optional[bool] = None,
1605
+ return_dict: Optional[bool] = None,
1606
+ ) -> Union[tuple, TFSemanticSegmenterOutput]:
1607
+ r"""
1608
+ labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*):
1609
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
1610
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
1611
+
1612
+ Returns:
1613
+
1614
+ Examples:
1615
+
1616
+ ```python
1617
+ >>> from transformers import AutoImageProcessor, TFData2VecVisionForSemanticSegmentation
1618
+ >>> from PIL import Image
1619
+ >>> import requests
1620
+
1621
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1622
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1623
+
1624
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base")
1625
+ >>> model = TFData2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
1626
+
1627
+ >>> inputs = image_processor(images=image, return_tensors="pt")
1628
+ >>> outputs = model(**inputs)
1629
+ >>> # logits are of shape (batch_size, num_labels, height, width)
1630
+ >>> logits = outputs.logits
1631
+ ```"""
1632
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1633
+ output_hidden_states = (
1634
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1635
+ )
1636
+
1637
+ outputs = self.data2vec_vision(
1638
+ pixel_values,
1639
+ head_mask=head_mask,
1640
+ output_attentions=output_attentions,
1641
+ output_hidden_states=True, # we need the intermediate hidden states
1642
+ return_dict=return_dict,
1643
+ )
1644
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
1645
+
1646
+ # only keep certain features, and reshape
1647
+ # note that we do +1 as the encoder_hidden_states also includes the initial embeddings
1648
+ features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
1649
+ patch_resolution = self.config.image_size // self.config.patch_size
1650
+
1651
+ def reshape_features(x):
1652
+ # We do it this way so TF can always infer the non-batch dims at compile time
1653
+ x = tf.reshape(x, (-1, patch_resolution, patch_resolution, self.config.hidden_size))
1654
+ return x
1655
+
1656
+ features = [reshape_features(x[:, 1:, :]) for x in features]
1657
+
1658
+ # apply FPNs
1659
+ ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
1660
+ for module in ops[0]:
1661
+ features[0] = module(features[0])
1662
+ features[1] = ops[1][0](features[1])
1663
+ for i in range(len(features[2:])):
1664
+ features[i + 2] = ops[i + 2](features[i + 2])
1665
+
1666
+ logits = self.decode_head(features)
1667
+ # Tranpose the logits to maintain consistency in the output formats.
1668
+ transposed_logits = tf.transpose(logits, perm=[0, 3, 1, 2])
1669
+
1670
+ auxiliary_logits = None
1671
+ if self.auxiliary_head is not None:
1672
+ auxiliary_logits = self.auxiliary_head(features)
1673
+
1674
+ loss = None
1675
+ if labels is not None:
1676
+ if self.config.num_labels == 1:
1677
+ raise ValueError("The number of labels should be greater than one")
1678
+ else:
1679
+ loss = self.compute_loss(logits, auxiliary_logits, labels)
1680
+
1681
+ if not return_dict:
1682
+ if output_hidden_states:
1683
+ output = (logits,) + outputs[1:]
1684
+ else:
1685
+ output = (logits,) + outputs[2:]
1686
+ return ((loss,) + output) if loss is not None else output
1687
+
1688
+ return TFSemanticSegmenterOutput(
1689
+ loss=loss,
1690
+ logits=transposed_logits,
1691
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1692
+ attentions=outputs.attentions,
1693
+ )
1694
+
1695
+ def build(self, input_shape=None):
1696
+ if self.built:
1697
+ return
1698
+ self.built = True
1699
+ if getattr(self, "data2vec_vision", None) is not None:
1700
+ with tf.name_scope(self.data2vec_vision.name):
1701
+ self.data2vec_vision.build(None)
1702
+ if getattr(self, "decode_head", None) is not None:
1703
+ with tf.name_scope(self.decode_head.name):
1704
+ self.decode_head.build(None)
1705
+ if getattr(self, "auxiliary_head", None) is not None:
1706
+ with tf.name_scope(self.auxiliary_head.name):
1707
+ self.auxiliary_head.build(None)
1708
+ if getattr(self, "fpn1", None) is not None:
1709
+ with tf.name_scope(self.fpn1[0].name):
1710
+ self.fpn1[0].build([None, None, None, self.config.hidden_size])
1711
+ with tf.name_scope(self.fpn1[1].name):
1712
+ self.fpn1[1].build((None, None, None, self.config.hidden_size))
1713
+ with tf.name_scope(self.fpn1[3].name):
1714
+ self.fpn1[3].build([None, None, None, self.config.hidden_size])
1715
+ if getattr(self, "fpn2", None) is not None:
1716
+ with tf.name_scope(self.fpn2[0].name):
1717
+ self.fpn2[0].build([None, None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__init__.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
28
+ "tokenization_deberta": ["DebertaTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_deberta"] = [
46
+ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "DebertaForMaskedLM",
48
+ "DebertaForQuestionAnswering",
49
+ "DebertaForSequenceClassification",
50
+ "DebertaForTokenClassification",
51
+ "DebertaModel",
52
+ "DebertaPreTrainedModel",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_deberta"] = [
62
+ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFDebertaForMaskedLM",
64
+ "TFDebertaForQuestionAnswering",
65
+ "TFDebertaForSequenceClassification",
66
+ "TFDebertaForTokenClassification",
67
+ "TFDebertaModel",
68
+ "TFDebertaPreTrainedModel",
69
+ ]
70
+
71
+
72
+ if TYPE_CHECKING:
73
+ from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
74
+ from .tokenization_deberta import DebertaTokenizer
75
+
76
+ try:
77
+ if not is_tokenizers_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .tokenization_deberta_fast import DebertaTokenizerFast
83
+
84
+ try:
85
+ if not is_torch_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .modeling_deberta import (
91
+ DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
92
+ DebertaForMaskedLM,
93
+ DebertaForQuestionAnswering,
94
+ DebertaForSequenceClassification,
95
+ DebertaForTokenClassification,
96
+ DebertaModel,
97
+ DebertaPreTrainedModel,
98
+ )
99
+
100
+ try:
101
+ if not is_tf_available():
102
+ raise OptionalDependencyNotAvailable()
103
+ except OptionalDependencyNotAvailable:
104
+ pass
105
+ else:
106
+ from .modeling_tf_deberta import (
107
+ TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
108
+ TFDebertaForMaskedLM,
109
+ TFDebertaForQuestionAnswering,
110
+ TFDebertaForSequenceClassification,
111
+ TFDebertaForTokenClassification,
112
+ TFDebertaModel,
113
+ TFDebertaPreTrainedModel,
114
+ )
115
+
116
+
117
+ else:
118
+ import sys
119
+
120
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc ADDED
Binary file (51.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc ADDED
Binary file (9.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DeBERTa model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ if TYPE_CHECKING:
25
+ from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class DebertaConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
37
+ used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
38
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
39
+ [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Arguments:
45
+ vocab_size (`int`, *optional*, defaults to 30522):
46
+ Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
47
+ `inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
48
+ hidden_size (`int`, *optional*, defaults to 768):
49
+ Dimensionality of the encoder layers and the pooler layer.
50
+ num_hidden_layers (`int`, *optional*, defaults to 12):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_attention_heads (`int`, *optional*, defaults to 12):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ intermediate_size (`int`, *optional*, defaults to 3072):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
56
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
59
+ are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 512):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ type_vocab_size (`int`, *optional*, defaults to 2):
68
+ The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ relative_attention (`bool`, *optional*, defaults to `False`):
74
+ Whether use relative position encoding.
75
+ max_relative_positions (`int`, *optional*, defaults to 1):
76
+ The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
77
+ as `max_position_embeddings`.
78
+ pad_token_id (`int`, *optional*, defaults to 0):
79
+ The value used to pad input_ids.
80
+ position_biased_input (`bool`, *optional*, defaults to `True`):
81
+ Whether add absolute position embedding to content embedding.
82
+ pos_att_type (`List[str]`, *optional*):
83
+ The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
84
+ `["p2c", "c2p"]`.
85
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
86
+ The epsilon used by the layer normalization layers.
87
+
88
+ Example:
89
+
90
+ ```python
91
+ >>> from transformers import DebertaConfig, DebertaModel
92
+
93
+ >>> # Initializing a DeBERTa microsoft/deberta-base style configuration
94
+ >>> configuration = DebertaConfig()
95
+
96
+ >>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
97
+ >>> model = DebertaModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "deberta"
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_size=50265,
108
+ hidden_size=768,
109
+ num_hidden_layers=12,
110
+ num_attention_heads=12,
111
+ intermediate_size=3072,
112
+ hidden_act="gelu",
113
+ hidden_dropout_prob=0.1,
114
+ attention_probs_dropout_prob=0.1,
115
+ max_position_embeddings=512,
116
+ type_vocab_size=0,
117
+ initializer_range=0.02,
118
+ layer_norm_eps=1e-7,
119
+ relative_attention=False,
120
+ max_relative_positions=-1,
121
+ pad_token_id=0,
122
+ position_biased_input=True,
123
+ pos_att_type=None,
124
+ pooler_dropout=0,
125
+ pooler_hidden_act="gelu",
126
+ **kwargs,
127
+ ):
128
+ super().__init__(**kwargs)
129
+
130
+ self.hidden_size = hidden_size
131
+ self.num_hidden_layers = num_hidden_layers
132
+ self.num_attention_heads = num_attention_heads
133
+ self.intermediate_size = intermediate_size
134
+ self.hidden_act = hidden_act
135
+ self.hidden_dropout_prob = hidden_dropout_prob
136
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.type_vocab_size = type_vocab_size
139
+ self.initializer_range = initializer_range
140
+ self.relative_attention = relative_attention
141
+ self.max_relative_positions = max_relative_positions
142
+ self.pad_token_id = pad_token_id
143
+ self.position_biased_input = position_biased_input
144
+
145
+ # Backwards compatibility
146
+ if isinstance(pos_att_type, str):
147
+ pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
148
+
149
+ self.pos_att_type = pos_att_type
150
+ self.vocab_size = vocab_size
151
+ self.layer_norm_eps = layer_norm_eps
152
+
153
+ self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
154
+ self.pooler_dropout = pooler_dropout
155
+ self.pooler_hidden_act = pooler_hidden_act
156
+
157
+
158
+ # Copied from transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig
159
+ class DebertaOnnxConfig(OnnxConfig):
160
+ @property
161
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
162
+ if self.task == "multiple-choice":
163
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
164
+ else:
165
+ dynamic_axis = {0: "batch", 1: "sequence"}
166
+ if self._config.type_vocab_size > 0:
167
+ return OrderedDict(
168
+ [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
169
+ )
170
+ else:
171
+ return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
172
+
173
+ @property
174
+ def default_onnx_opset(self) -> int:
175
+ return 12
176
+
177
+ def generate_dummy_inputs(
178
+ self,
179
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
180
+ batch_size: int = -1,
181
+ seq_length: int = -1,
182
+ num_choices: int = -1,
183
+ is_pair: bool = False,
184
+ framework: Optional["TensorType"] = None,
185
+ num_channels: int = 3,
186
+ image_width: int = 40,
187
+ image_height: int = 40,
188
+ tokenizer: "PreTrainedTokenizerBase" = None,
189
+ ) -> Mapping[str, Any]:
190
+ dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
191
+ if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
192
+ del dummy_inputs["token_type_ids"]
193
+ return dummy_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py ADDED
@@ -0,0 +1,1426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the Hugging Face Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DeBERTa model."""
16
+
17
+ from collections.abc import Sequence
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ MaskedLMOutput,
29
+ QuestionAnsweringModelOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import softmax_backward_data
35
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
36
+ from .configuration_deberta import DebertaConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+ _CONFIG_FOR_DOC = "DebertaConfig"
41
+ _CHECKPOINT_FOR_DOC = "microsoft/deberta-base"
42
+
43
+ # Masked LM docstring
44
+ _CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback"
45
+ _MASKED_LM_EXPECTED_OUTPUT = "' Paris'"
46
+ _MASKED_LM_EXPECTED_LOSS = "0.54"
47
+
48
+ # QuestionAnswering docstring
49
+ _CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad"
50
+ _QA_EXPECTED_OUTPUT = "' a nice puppet'"
51
+ _QA_EXPECTED_LOSS = 0.14
52
+ _QA_TARGET_START_INDEX = 12
53
+ _QA_TARGET_END_INDEX = 14
54
+
55
+
56
+ from ..deprecated._archive_maps import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ class ContextPooler(nn.Module):
60
+ def __init__(self, config):
61
+ super().__init__()
62
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
63
+ self.dropout = StableDropout(config.pooler_dropout)
64
+ self.config = config
65
+
66
+ def forward(self, hidden_states):
67
+ # We "pool" the model by simply taking the hidden state corresponding
68
+ # to the first token.
69
+
70
+ context_token = hidden_states[:, 0]
71
+ context_token = self.dropout(context_token)
72
+ pooled_output = self.dense(context_token)
73
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
74
+ return pooled_output
75
+
76
+ @property
77
+ def output_dim(self):
78
+ return self.config.hidden_size
79
+
80
+
81
+ class XSoftmax(torch.autograd.Function):
82
+ """
83
+ Masked Softmax which is optimized for saving memory
84
+
85
+ Args:
86
+ input (`torch.tensor`): The input tensor that will apply softmax.
87
+ mask (`torch.IntTensor`):
88
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
89
+ dim (int): The dimension that will apply softmax
90
+
91
+ Example:
92
+
93
+ ```python
94
+ >>> import torch
95
+ >>> from transformers.models.deberta.modeling_deberta import XSoftmax
96
+
97
+ >>> # Make a tensor
98
+ >>> x = torch.randn([4, 20, 100])
99
+
100
+ >>> # Create a mask
101
+ >>> mask = (x > 0).int()
102
+
103
+ >>> # Specify the dimension to apply softmax
104
+ >>> dim = -1
105
+
106
+ >>> y = XSoftmax.apply(x, mask, dim)
107
+ ```"""
108
+
109
+ @staticmethod
110
+ def forward(self, input, mask, dim):
111
+ self.dim = dim
112
+ rmask = ~(mask.to(torch.bool))
113
+
114
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
115
+ output = torch.softmax(output, self.dim)
116
+ output.masked_fill_(rmask, 0)
117
+ self.save_for_backward(output)
118
+ return output
119
+
120
+ @staticmethod
121
+ def backward(self, grad_output):
122
+ (output,) = self.saved_tensors
123
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
124
+ return inputGrad, None, None
125
+
126
+ @staticmethod
127
+ def symbolic(g, self, mask, dim):
128
+ import torch.onnx.symbolic_helper as sym_help
129
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
130
+
131
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
132
+ r_mask = g.op(
133
+ "Cast",
134
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
135
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
136
+ )
137
+ output = masked_fill(
138
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
139
+ )
140
+ output = softmax(g, output, dim)
141
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
142
+
143
+
144
+ class DropoutContext(object):
145
+ def __init__(self):
146
+ self.dropout = 0
147
+ self.mask = None
148
+ self.scale = 1
149
+ self.reuse_mask = True
150
+
151
+
152
+ def get_mask(input, local_context):
153
+ if not isinstance(local_context, DropoutContext):
154
+ dropout = local_context
155
+ mask = None
156
+ else:
157
+ dropout = local_context.dropout
158
+ dropout *= local_context.scale
159
+ mask = local_context.mask if local_context.reuse_mask else None
160
+
161
+ if dropout > 0 and mask is None:
162
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
163
+
164
+ if isinstance(local_context, DropoutContext):
165
+ if local_context.mask is None:
166
+ local_context.mask = mask
167
+
168
+ return mask, dropout
169
+
170
+
171
+ class XDropout(torch.autograd.Function):
172
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
173
+
174
+ @staticmethod
175
+ def forward(ctx, input, local_ctx):
176
+ mask, dropout = get_mask(input, local_ctx)
177
+ ctx.scale = 1.0 / (1 - dropout)
178
+ if dropout > 0:
179
+ ctx.save_for_backward(mask)
180
+ return input.masked_fill(mask, 0) * ctx.scale
181
+ else:
182
+ return input
183
+
184
+ @staticmethod
185
+ def backward(ctx, grad_output):
186
+ if ctx.scale > 1:
187
+ (mask,) = ctx.saved_tensors
188
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
189
+ else:
190
+ return grad_output, None
191
+
192
+ @staticmethod
193
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
194
+ from torch.onnx import symbolic_opset12
195
+
196
+ dropout_p = local_ctx
197
+ if isinstance(local_ctx, DropoutContext):
198
+ dropout_p = local_ctx.dropout
199
+ # StableDropout only calls this function when training.
200
+ train = True
201
+ # TODO: We should check if the opset_version being used to export
202
+ # is > 12 here, but there's no good way to do that. As-is, if the
203
+ # opset_version < 12, export will fail with a CheckerError.
204
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
205
+ # if opset_version < 12:
206
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
207
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
208
+
209
+
210
+ class StableDropout(nn.Module):
211
+ """
212
+ Optimized dropout module for stabilizing the training
213
+
214
+ Args:
215
+ drop_prob (float): the dropout probabilities
216
+ """
217
+
218
+ def __init__(self, drop_prob):
219
+ super().__init__()
220
+ self.drop_prob = drop_prob
221
+ self.count = 0
222
+ self.context_stack = None
223
+
224
+ def forward(self, x):
225
+ """
226
+ Call the module
227
+
228
+ Args:
229
+ x (`torch.tensor`): The input tensor to apply dropout
230
+ """
231
+ if self.training and self.drop_prob > 0:
232
+ return XDropout.apply(x, self.get_context())
233
+ return x
234
+
235
+ def clear_context(self):
236
+ self.count = 0
237
+ self.context_stack = None
238
+
239
+ def init_context(self, reuse_mask=True, scale=1):
240
+ if self.context_stack is None:
241
+ self.context_stack = []
242
+ self.count = 0
243
+ for c in self.context_stack:
244
+ c.reuse_mask = reuse_mask
245
+ c.scale = scale
246
+
247
+ def get_context(self):
248
+ if self.context_stack is not None:
249
+ if self.count >= len(self.context_stack):
250
+ self.context_stack.append(DropoutContext())
251
+ ctx = self.context_stack[self.count]
252
+ ctx.dropout = self.drop_prob
253
+ self.count += 1
254
+ return ctx
255
+ else:
256
+ return self.drop_prob
257
+
258
+
259
+ class DebertaLayerNorm(nn.Module):
260
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
261
+
262
+ def __init__(self, size, eps=1e-12):
263
+ super().__init__()
264
+ self.weight = nn.Parameter(torch.ones(size))
265
+ self.bias = nn.Parameter(torch.zeros(size))
266
+ self.variance_epsilon = eps
267
+
268
+ def forward(self, hidden_states):
269
+ input_type = hidden_states.dtype
270
+ hidden_states = hidden_states.float()
271
+ mean = hidden_states.mean(-1, keepdim=True)
272
+ variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
273
+ hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
274
+ hidden_states = hidden_states.to(input_type)
275
+ y = self.weight * hidden_states + self.bias
276
+ return y
277
+
278
+
279
+ class DebertaSelfOutput(nn.Module):
280
+ def __init__(self, config):
281
+ super().__init__()
282
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
283
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
284
+ self.dropout = StableDropout(config.hidden_dropout_prob)
285
+
286
+ def forward(self, hidden_states, input_tensor):
287
+ hidden_states = self.dense(hidden_states)
288
+ hidden_states = self.dropout(hidden_states)
289
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
290
+ return hidden_states
291
+
292
+
293
+ class DebertaAttention(nn.Module):
294
+ def __init__(self, config):
295
+ super().__init__()
296
+ self.self = DisentangledSelfAttention(config)
297
+ self.output = DebertaSelfOutput(config)
298
+ self.config = config
299
+
300
+ def forward(
301
+ self,
302
+ hidden_states,
303
+ attention_mask,
304
+ output_attentions=False,
305
+ query_states=None,
306
+ relative_pos=None,
307
+ rel_embeddings=None,
308
+ ):
309
+ self_output = self.self(
310
+ hidden_states,
311
+ attention_mask,
312
+ output_attentions,
313
+ query_states=query_states,
314
+ relative_pos=relative_pos,
315
+ rel_embeddings=rel_embeddings,
316
+ )
317
+ if output_attentions:
318
+ self_output, att_matrix = self_output
319
+ if query_states is None:
320
+ query_states = hidden_states
321
+ attention_output = self.output(self_output, query_states)
322
+
323
+ if output_attentions:
324
+ return (attention_output, att_matrix)
325
+ else:
326
+ return attention_output
327
+
328
+
329
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta
330
+ class DebertaIntermediate(nn.Module):
331
+ def __init__(self, config):
332
+ super().__init__()
333
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
334
+ if isinstance(config.hidden_act, str):
335
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
336
+ else:
337
+ self.intermediate_act_fn = config.hidden_act
338
+
339
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
340
+ hidden_states = self.dense(hidden_states)
341
+ hidden_states = self.intermediate_act_fn(hidden_states)
342
+ return hidden_states
343
+
344
+
345
+ class DebertaOutput(nn.Module):
346
+ def __init__(self, config):
347
+ super().__init__()
348
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
349
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
350
+ self.dropout = StableDropout(config.hidden_dropout_prob)
351
+ self.config = config
352
+
353
+ def forward(self, hidden_states, input_tensor):
354
+ hidden_states = self.dense(hidden_states)
355
+ hidden_states = self.dropout(hidden_states)
356
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
357
+ return hidden_states
358
+
359
+
360
+ class DebertaLayer(nn.Module):
361
+ def __init__(self, config):
362
+ super().__init__()
363
+ self.attention = DebertaAttention(config)
364
+ self.intermediate = DebertaIntermediate(config)
365
+ self.output = DebertaOutput(config)
366
+
367
+ def forward(
368
+ self,
369
+ hidden_states,
370
+ attention_mask,
371
+ query_states=None,
372
+ relative_pos=None,
373
+ rel_embeddings=None,
374
+ output_attentions=False,
375
+ ):
376
+ attention_output = self.attention(
377
+ hidden_states,
378
+ attention_mask,
379
+ output_attentions=output_attentions,
380
+ query_states=query_states,
381
+ relative_pos=relative_pos,
382
+ rel_embeddings=rel_embeddings,
383
+ )
384
+ if output_attentions:
385
+ attention_output, att_matrix = attention_output
386
+ intermediate_output = self.intermediate(attention_output)
387
+ layer_output = self.output(intermediate_output, attention_output)
388
+ if output_attentions:
389
+ return (layer_output, att_matrix)
390
+ else:
391
+ return layer_output
392
+
393
+
394
+ class DebertaEncoder(nn.Module):
395
+ """Modified BertEncoder with relative position bias support"""
396
+
397
+ def __init__(self, config):
398
+ super().__init__()
399
+ self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
400
+ self.relative_attention = getattr(config, "relative_attention", False)
401
+ if self.relative_attention:
402
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
403
+ if self.max_relative_positions < 1:
404
+ self.max_relative_positions = config.max_position_embeddings
405
+ self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
406
+ self.gradient_checkpointing = False
407
+
408
+ def get_rel_embedding(self):
409
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
410
+ return rel_embeddings
411
+
412
+ def get_attention_mask(self, attention_mask):
413
+ if attention_mask.dim() <= 2:
414
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
415
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
416
+ elif attention_mask.dim() == 3:
417
+ attention_mask = attention_mask.unsqueeze(1)
418
+
419
+ return attention_mask
420
+
421
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
422
+ if self.relative_attention and relative_pos is None:
423
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
424
+ relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)
425
+ return relative_pos
426
+
427
+ def forward(
428
+ self,
429
+ hidden_states,
430
+ attention_mask,
431
+ output_hidden_states=True,
432
+ output_attentions=False,
433
+ query_states=None,
434
+ relative_pos=None,
435
+ return_dict=True,
436
+ ):
437
+ attention_mask = self.get_attention_mask(attention_mask)
438
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
439
+
440
+ all_hidden_states = () if output_hidden_states else None
441
+ all_attentions = () if output_attentions else None
442
+
443
+ if isinstance(hidden_states, Sequence):
444
+ next_kv = hidden_states[0]
445
+ else:
446
+ next_kv = hidden_states
447
+ rel_embeddings = self.get_rel_embedding()
448
+ for i, layer_module in enumerate(self.layer):
449
+ if output_hidden_states:
450
+ all_hidden_states = all_hidden_states + (hidden_states,)
451
+
452
+ if self.gradient_checkpointing and self.training:
453
+ hidden_states = self._gradient_checkpointing_func(
454
+ layer_module.__call__,
455
+ next_kv,
456
+ attention_mask,
457
+ query_states,
458
+ relative_pos,
459
+ rel_embeddings,
460
+ output_attentions,
461
+ )
462
+ else:
463
+ hidden_states = layer_module(
464
+ next_kv,
465
+ attention_mask,
466
+ query_states=query_states,
467
+ relative_pos=relative_pos,
468
+ rel_embeddings=rel_embeddings,
469
+ output_attentions=output_attentions,
470
+ )
471
+
472
+ if output_attentions:
473
+ hidden_states, att_m = hidden_states
474
+
475
+ if query_states is not None:
476
+ query_states = hidden_states
477
+ if isinstance(hidden_states, Sequence):
478
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
479
+ else:
480
+ next_kv = hidden_states
481
+
482
+ if output_attentions:
483
+ all_attentions = all_attentions + (att_m,)
484
+
485
+ if output_hidden_states:
486
+ all_hidden_states = all_hidden_states + (hidden_states,)
487
+
488
+ if not return_dict:
489
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
490
+ return BaseModelOutput(
491
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
492
+ )
493
+
494
+
495
+ def build_relative_position(query_size, key_size, device):
496
+ """
497
+ Build relative position according to the query and key
498
+
499
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
500
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
501
+ P_k\\)
502
+
503
+ Args:
504
+ query_size (int): the length of query
505
+ key_size (int): the length of key
506
+
507
+ Return:
508
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
509
+
510
+ """
511
+
512
+ q_ids = torch.arange(query_size, dtype=torch.long, device=device)
513
+ k_ids = torch.arange(key_size, dtype=torch.long, device=device)
514
+ rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
515
+ rel_pos_ids = rel_pos_ids[:query_size, :]
516
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
517
+ return rel_pos_ids
518
+
519
+
520
+ @torch.jit.script
521
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
522
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
523
+
524
+
525
+ @torch.jit.script
526
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
527
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
528
+
529
+
530
+ @torch.jit.script
531
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
532
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
533
+
534
+
535
+ class DisentangledSelfAttention(nn.Module):
536
+ """
537
+ Disentangled self-attention module
538
+
539
+ Parameters:
540
+ config (`str`):
541
+ A model config class instance with the configuration to build a new model. The schema is similar to
542
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
543
+
544
+ """
545
+
546
+ def __init__(self, config):
547
+ super().__init__()
548
+ if config.hidden_size % config.num_attention_heads != 0:
549
+ raise ValueError(
550
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
551
+ f"heads ({config.num_attention_heads})"
552
+ )
553
+ self.num_attention_heads = config.num_attention_heads
554
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
555
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
556
+ self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
557
+ self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
558
+ self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
559
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
560
+
561
+ self.relative_attention = getattr(config, "relative_attention", False)
562
+ self.talking_head = getattr(config, "talking_head", False)
563
+
564
+ if self.talking_head:
565
+ self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
566
+ self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
567
+
568
+ if self.relative_attention:
569
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
570
+ if self.max_relative_positions < 1:
571
+ self.max_relative_positions = config.max_position_embeddings
572
+ self.pos_dropout = StableDropout(config.hidden_dropout_prob)
573
+
574
+ if "c2p" in self.pos_att_type:
575
+ self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
576
+ if "p2c" in self.pos_att_type:
577
+ self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
578
+
579
+ self.dropout = StableDropout(config.attention_probs_dropout_prob)
580
+
581
+ def transpose_for_scores(self, x):
582
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
583
+ x = x.view(new_x_shape)
584
+ return x.permute(0, 2, 1, 3)
585
+
586
+ def forward(
587
+ self,
588
+ hidden_states,
589
+ attention_mask,
590
+ output_attentions=False,
591
+ query_states=None,
592
+ relative_pos=None,
593
+ rel_embeddings=None,
594
+ ):
595
+ """
596
+ Call the module
597
+
598
+ Args:
599
+ hidden_states (`torch.FloatTensor`):
600
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
601
+ *Attention(Q,K,V)*
602
+
603
+ attention_mask (`torch.BoolTensor`):
604
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
605
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
606
+ th token.
607
+
608
+ output_attentions (`bool`, optional):
609
+ Whether return the attention matrix.
610
+
611
+ query_states (`torch.FloatTensor`, optional):
612
+ The *Q* state in *Attention(Q,K,V)*.
613
+
614
+ relative_pos (`torch.LongTensor`):
615
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
616
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
617
+
618
+ rel_embeddings (`torch.FloatTensor`):
619
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
620
+ \\text{max_relative_positions}\\), *hidden_size*].
621
+
622
+
623
+ """
624
+ if query_states is None:
625
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
626
+ query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)
627
+ else:
628
+
629
+ def linear(w, b, x):
630
+ if b is not None:
631
+ return torch.matmul(x, w.t()) + b.t()
632
+ else:
633
+ return torch.matmul(x, w.t()) # + b.t()
634
+
635
+ ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
636
+ qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
637
+ qkvb = [None] * 3
638
+
639
+ q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype))
640
+ k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)]
641
+ query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]
642
+
643
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
644
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
645
+
646
+ rel_att = None
647
+ # Take the dot product between "query" and "key" to get the raw attention scores.
648
+ scale_factor = 1 + len(self.pos_att_type)
649
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
650
+ query_layer = query_layer / scale.to(dtype=query_layer.dtype)
651
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
652
+ if self.relative_attention:
653
+ rel_embeddings = self.pos_dropout(rel_embeddings)
654
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
655
+
656
+ if rel_att is not None:
657
+ attention_scores = attention_scores + rel_att
658
+
659
+ # bxhxlxd
660
+ if self.talking_head:
661
+ attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
662
+
663
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
664
+ attention_probs = self.dropout(attention_probs)
665
+ if self.talking_head:
666
+ attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
667
+
668
+ context_layer = torch.matmul(attention_probs, value_layer)
669
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
670
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
671
+ context_layer = context_layer.view(new_context_layer_shape)
672
+ if output_attentions:
673
+ return (context_layer, attention_probs)
674
+ else:
675
+ return context_layer
676
+
677
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
678
+ if relative_pos is None:
679
+ q = query_layer.size(-2)
680
+ relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)
681
+ if relative_pos.dim() == 2:
682
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
683
+ elif relative_pos.dim() == 3:
684
+ relative_pos = relative_pos.unsqueeze(1)
685
+ # bxhxqxk
686
+ elif relative_pos.dim() != 4:
687
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
688
+
689
+ att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)
690
+ relative_pos = relative_pos.long().to(query_layer.device)
691
+ rel_embeddings = rel_embeddings[
692
+ self.max_relative_positions - att_span : self.max_relative_positions + att_span, :
693
+ ].unsqueeze(0)
694
+
695
+ score = 0
696
+
697
+ # content->position
698
+ if "c2p" in self.pos_att_type:
699
+ pos_key_layer = self.pos_proj(rel_embeddings)
700
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
701
+ c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
702
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
703
+ c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
704
+ score += c2p_att
705
+
706
+ # position->content
707
+ if "p2c" in self.pos_att_type:
708
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
709
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
710
+ pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
711
+ if query_layer.size(-2) != key_layer.size(-2):
712
+ r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)
713
+ else:
714
+ r_pos = relative_pos
715
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
716
+ p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype))
717
+ p2c_att = torch.gather(
718
+ p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)
719
+ ).transpose(-1, -2)
720
+
721
+ if query_layer.size(-2) != key_layer.size(-2):
722
+ pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
723
+ p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))
724
+ score += p2c_att
725
+
726
+ return score
727
+
728
+
729
+ class DebertaEmbeddings(nn.Module):
730
+ """Construct the embeddings from word, position and token_type embeddings."""
731
+
732
+ def __init__(self, config):
733
+ super().__init__()
734
+ pad_token_id = getattr(config, "pad_token_id", 0)
735
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
736
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
737
+
738
+ self.position_biased_input = getattr(config, "position_biased_input", True)
739
+ if not self.position_biased_input:
740
+ self.position_embeddings = None
741
+ else:
742
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
743
+
744
+ if config.type_vocab_size > 0:
745
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
746
+
747
+ if self.embedding_size != config.hidden_size:
748
+ self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
749
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
750
+ self.dropout = StableDropout(config.hidden_dropout_prob)
751
+ self.config = config
752
+
753
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
754
+ self.register_buffer(
755
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
756
+ )
757
+
758
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
759
+ if input_ids is not None:
760
+ input_shape = input_ids.size()
761
+ else:
762
+ input_shape = inputs_embeds.size()[:-1]
763
+
764
+ seq_length = input_shape[1]
765
+
766
+ if position_ids is None:
767
+ position_ids = self.position_ids[:, :seq_length]
768
+
769
+ if token_type_ids is None:
770
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
771
+
772
+ if inputs_embeds is None:
773
+ inputs_embeds = self.word_embeddings(input_ids)
774
+
775
+ if self.position_embeddings is not None:
776
+ position_embeddings = self.position_embeddings(position_ids.long())
777
+ else:
778
+ position_embeddings = torch.zeros_like(inputs_embeds)
779
+
780
+ embeddings = inputs_embeds
781
+ if self.position_biased_input:
782
+ embeddings += position_embeddings
783
+ if self.config.type_vocab_size > 0:
784
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
785
+ embeddings += token_type_embeddings
786
+
787
+ if self.embedding_size != self.config.hidden_size:
788
+ embeddings = self.embed_proj(embeddings)
789
+
790
+ embeddings = self.LayerNorm(embeddings)
791
+
792
+ if mask is not None:
793
+ if mask.dim() != embeddings.dim():
794
+ if mask.dim() == 4:
795
+ mask = mask.squeeze(1).squeeze(1)
796
+ mask = mask.unsqueeze(2)
797
+ mask = mask.to(embeddings.dtype)
798
+
799
+ embeddings = embeddings * mask
800
+
801
+ embeddings = self.dropout(embeddings)
802
+ return embeddings
803
+
804
+
805
+ class DebertaPreTrainedModel(PreTrainedModel):
806
+ """
807
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
808
+ models.
809
+ """
810
+
811
+ config_class = DebertaConfig
812
+ base_model_prefix = "deberta"
813
+ _keys_to_ignore_on_load_unexpected = ["position_embeddings"]
814
+ supports_gradient_checkpointing = True
815
+
816
+ def _init_weights(self, module):
817
+ """Initialize the weights."""
818
+ if isinstance(module, nn.Linear):
819
+ # Slightly different from the TF version which uses truncated_normal for initialization
820
+ # cf https://github.com/pytorch/pytorch/pull/5617
821
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
822
+ if module.bias is not None:
823
+ module.bias.data.zero_()
824
+ elif isinstance(module, nn.Embedding):
825
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
826
+ if module.padding_idx is not None:
827
+ module.weight.data[module.padding_idx].zero_()
828
+
829
+
830
+ DEBERTA_START_DOCSTRING = r"""
831
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
832
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
833
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
834
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
835
+
836
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
837
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
838
+ and behavior.
839
+
840
+
841
+ Parameters:
842
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
843
+ Initializing with a config file does not load the weights associated with the model, only the
844
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
845
+ """
846
+
847
+ DEBERTA_INPUTS_DOCSTRING = r"""
848
+ Args:
849
+ input_ids (`torch.LongTensor` of shape `({0})`):
850
+ Indices of input sequence tokens in the vocabulary.
851
+
852
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
853
+ [`PreTrainedTokenizer.__call__`] for details.
854
+
855
+ [What are input IDs?](../glossary#input-ids)
856
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
857
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
858
+
859
+ - 1 for tokens that are **not masked**,
860
+ - 0 for tokens that are **masked**.
861
+
862
+ [What are attention masks?](../glossary#attention-mask)
863
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
864
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
865
+ 1]`:
866
+
867
+ - 0 corresponds to a *sentence A* token,
868
+ - 1 corresponds to a *sentence B* token.
869
+
870
+ [What are token type IDs?](../glossary#token-type-ids)
871
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
872
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
873
+ config.max_position_embeddings - 1]`.
874
+
875
+ [What are position IDs?](../glossary#position-ids)
876
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
877
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
878
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
879
+ model's internal embedding lookup matrix.
880
+ output_attentions (`bool`, *optional*):
881
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
882
+ tensors for more detail.
883
+ output_hidden_states (`bool`, *optional*):
884
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
885
+ more detail.
886
+ return_dict (`bool`, *optional*):
887
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
888
+ """
889
+
890
+
891
+ @add_start_docstrings(
892
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
893
+ DEBERTA_START_DOCSTRING,
894
+ )
895
+ class DebertaModel(DebertaPreTrainedModel):
896
+ def __init__(self, config):
897
+ super().__init__(config)
898
+
899
+ self.embeddings = DebertaEmbeddings(config)
900
+ self.encoder = DebertaEncoder(config)
901
+ self.z_steps = 0
902
+ self.config = config
903
+ # Initialize weights and apply final processing
904
+ self.post_init()
905
+
906
+ def get_input_embeddings(self):
907
+ return self.embeddings.word_embeddings
908
+
909
+ def set_input_embeddings(self, new_embeddings):
910
+ self.embeddings.word_embeddings = new_embeddings
911
+
912
+ def _prune_heads(self, heads_to_prune):
913
+ """
914
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
915
+ class PreTrainedModel
916
+ """
917
+ raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
918
+
919
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
920
+ @add_code_sample_docstrings(
921
+ checkpoint=_CHECKPOINT_FOR_DOC,
922
+ output_type=BaseModelOutput,
923
+ config_class=_CONFIG_FOR_DOC,
924
+ )
925
+ def forward(
926
+ self,
927
+ input_ids: Optional[torch.Tensor] = None,
928
+ attention_mask: Optional[torch.Tensor] = None,
929
+ token_type_ids: Optional[torch.Tensor] = None,
930
+ position_ids: Optional[torch.Tensor] = None,
931
+ inputs_embeds: Optional[torch.Tensor] = None,
932
+ output_attentions: Optional[bool] = None,
933
+ output_hidden_states: Optional[bool] = None,
934
+ return_dict: Optional[bool] = None,
935
+ ) -> Union[Tuple, BaseModelOutput]:
936
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
937
+ output_hidden_states = (
938
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
939
+ )
940
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
941
+
942
+ if input_ids is not None and inputs_embeds is not None:
943
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
944
+ elif input_ids is not None:
945
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
946
+ input_shape = input_ids.size()
947
+ elif inputs_embeds is not None:
948
+ input_shape = inputs_embeds.size()[:-1]
949
+ else:
950
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
951
+
952
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
953
+
954
+ if attention_mask is None:
955
+ attention_mask = torch.ones(input_shape, device=device)
956
+ if token_type_ids is None:
957
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
958
+
959
+ embedding_output = self.embeddings(
960
+ input_ids=input_ids,
961
+ token_type_ids=token_type_ids,
962
+ position_ids=position_ids,
963
+ mask=attention_mask,
964
+ inputs_embeds=inputs_embeds,
965
+ )
966
+
967
+ encoder_outputs = self.encoder(
968
+ embedding_output,
969
+ attention_mask,
970
+ output_hidden_states=True,
971
+ output_attentions=output_attentions,
972
+ return_dict=return_dict,
973
+ )
974
+ encoded_layers = encoder_outputs[1]
975
+
976
+ if self.z_steps > 1:
977
+ hidden_states = encoded_layers[-2]
978
+ layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
979
+ query_states = encoded_layers[-1]
980
+ rel_embeddings = self.encoder.get_rel_embedding()
981
+ attention_mask = self.encoder.get_attention_mask(attention_mask)
982
+ rel_pos = self.encoder.get_rel_pos(embedding_output)
983
+ for layer in layers[1:]:
984
+ query_states = layer(
985
+ hidden_states,
986
+ attention_mask,
987
+ output_attentions=False,
988
+ query_states=query_states,
989
+ relative_pos=rel_pos,
990
+ rel_embeddings=rel_embeddings,
991
+ )
992
+ encoded_layers.append(query_states)
993
+
994
+ sequence_output = encoded_layers[-1]
995
+
996
+ if not return_dict:
997
+ return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
998
+
999
+ return BaseModelOutput(
1000
+ last_hidden_state=sequence_output,
1001
+ hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
1002
+ attentions=encoder_outputs.attentions,
1003
+ )
1004
+
1005
+
1006
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1007
+ class DebertaForMaskedLM(DebertaPreTrainedModel):
1008
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1009
+
1010
+ def __init__(self, config):
1011
+ super().__init__(config)
1012
+
1013
+ self.deberta = DebertaModel(config)
1014
+ self.cls = DebertaOnlyMLMHead(config)
1015
+
1016
+ # Initialize weights and apply final processing
1017
+ self.post_init()
1018
+
1019
+ def get_output_embeddings(self):
1020
+ return self.cls.predictions.decoder
1021
+
1022
+ def set_output_embeddings(self, new_embeddings):
1023
+ self.cls.predictions.decoder = new_embeddings
1024
+
1025
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1026
+ @add_code_sample_docstrings(
1027
+ checkpoint=_CHECKPOINT_FOR_MASKED_LM,
1028
+ output_type=MaskedLMOutput,
1029
+ config_class=_CONFIG_FOR_DOC,
1030
+ mask="[MASK]",
1031
+ expected_output=_MASKED_LM_EXPECTED_OUTPUT,
1032
+ expected_loss=_MASKED_LM_EXPECTED_LOSS,
1033
+ )
1034
+ def forward(
1035
+ self,
1036
+ input_ids: Optional[torch.Tensor] = None,
1037
+ attention_mask: Optional[torch.Tensor] = None,
1038
+ token_type_ids: Optional[torch.Tensor] = None,
1039
+ position_ids: Optional[torch.Tensor] = None,
1040
+ inputs_embeds: Optional[torch.Tensor] = None,
1041
+ labels: Optional[torch.Tensor] = None,
1042
+ output_attentions: Optional[bool] = None,
1043
+ output_hidden_states: Optional[bool] = None,
1044
+ return_dict: Optional[bool] = None,
1045
+ ) -> Union[Tuple, MaskedLMOutput]:
1046
+ r"""
1047
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1048
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1049
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1050
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1051
+ """
1052
+
1053
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1054
+
1055
+ outputs = self.deberta(
1056
+ input_ids,
1057
+ attention_mask=attention_mask,
1058
+ token_type_ids=token_type_ids,
1059
+ position_ids=position_ids,
1060
+ inputs_embeds=inputs_embeds,
1061
+ output_attentions=output_attentions,
1062
+ output_hidden_states=output_hidden_states,
1063
+ return_dict=return_dict,
1064
+ )
1065
+
1066
+ sequence_output = outputs[0]
1067
+ prediction_scores = self.cls(sequence_output)
1068
+
1069
+ masked_lm_loss = None
1070
+ if labels is not None:
1071
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1072
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1073
+
1074
+ if not return_dict:
1075
+ output = (prediction_scores,) + outputs[1:]
1076
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1077
+
1078
+ return MaskedLMOutput(
1079
+ loss=masked_lm_loss,
1080
+ logits=prediction_scores,
1081
+ hidden_states=outputs.hidden_states,
1082
+ attentions=outputs.attentions,
1083
+ )
1084
+
1085
+
1086
+ class DebertaPredictionHeadTransform(nn.Module):
1087
+ def __init__(self, config):
1088
+ super().__init__()
1089
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1090
+
1091
+ self.dense = nn.Linear(config.hidden_size, self.embedding_size)
1092
+ if isinstance(config.hidden_act, str):
1093
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1094
+ else:
1095
+ self.transform_act_fn = config.hidden_act
1096
+ self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
1097
+
1098
+ def forward(self, hidden_states):
1099
+ hidden_states = self.dense(hidden_states)
1100
+ hidden_states = self.transform_act_fn(hidden_states)
1101
+ hidden_states = self.LayerNorm(hidden_states)
1102
+ return hidden_states
1103
+
1104
+
1105
+ class DebertaLMPredictionHead(nn.Module):
1106
+ def __init__(self, config):
1107
+ super().__init__()
1108
+ self.transform = DebertaPredictionHeadTransform(config)
1109
+
1110
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1111
+ # The output weights are the same as the input embeddings, but there is
1112
+ # an output-only bias for each token.
1113
+ self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
1114
+
1115
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1116
+
1117
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1118
+ self.decoder.bias = self.bias
1119
+
1120
+ def forward(self, hidden_states):
1121
+ hidden_states = self.transform(hidden_states)
1122
+ hidden_states = self.decoder(hidden_states)
1123
+ return hidden_states
1124
+
1125
+
1126
+ # copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
1127
+ class DebertaOnlyMLMHead(nn.Module):
1128
+ def __init__(self, config):
1129
+ super().__init__()
1130
+ self.predictions = DebertaLMPredictionHead(config)
1131
+
1132
+ def forward(self, sequence_output):
1133
+ prediction_scores = self.predictions(sequence_output)
1134
+ return prediction_scores
1135
+
1136
+
1137
+ @add_start_docstrings(
1138
+ """
1139
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1140
+ pooled output) e.g. for GLUE tasks.
1141
+ """,
1142
+ DEBERTA_START_DOCSTRING,
1143
+ )
1144
+ class DebertaForSequenceClassification(DebertaPreTrainedModel):
1145
+ def __init__(self, config):
1146
+ super().__init__(config)
1147
+
1148
+ num_labels = getattr(config, "num_labels", 2)
1149
+ self.num_labels = num_labels
1150
+
1151
+ self.deberta = DebertaModel(config)
1152
+ self.pooler = ContextPooler(config)
1153
+ output_dim = self.pooler.output_dim
1154
+
1155
+ self.classifier = nn.Linear(output_dim, num_labels)
1156
+ drop_out = getattr(config, "cls_dropout", None)
1157
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1158
+ self.dropout = StableDropout(drop_out)
1159
+
1160
+ # Initialize weights and apply final processing
1161
+ self.post_init()
1162
+
1163
+ def get_input_embeddings(self):
1164
+ return self.deberta.get_input_embeddings()
1165
+
1166
+ def set_input_embeddings(self, new_embeddings):
1167
+ self.deberta.set_input_embeddings(new_embeddings)
1168
+
1169
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1170
+ @add_code_sample_docstrings(
1171
+ checkpoint=_CHECKPOINT_FOR_DOC,
1172
+ output_type=SequenceClassifierOutput,
1173
+ config_class=_CONFIG_FOR_DOC,
1174
+ )
1175
+ def forward(
1176
+ self,
1177
+ input_ids: Optional[torch.Tensor] = None,
1178
+ attention_mask: Optional[torch.Tensor] = None,
1179
+ token_type_ids: Optional[torch.Tensor] = None,
1180
+ position_ids: Optional[torch.Tensor] = None,
1181
+ inputs_embeds: Optional[torch.Tensor] = None,
1182
+ labels: Optional[torch.Tensor] = None,
1183
+ output_attentions: Optional[bool] = None,
1184
+ output_hidden_states: Optional[bool] = None,
1185
+ return_dict: Optional[bool] = None,
1186
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1187
+ r"""
1188
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1189
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1190
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1191
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1192
+ """
1193
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1194
+
1195
+ outputs = self.deberta(
1196
+ input_ids,
1197
+ token_type_ids=token_type_ids,
1198
+ attention_mask=attention_mask,
1199
+ position_ids=position_ids,
1200
+ inputs_embeds=inputs_embeds,
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ )
1205
+
1206
+ encoder_layer = outputs[0]
1207
+ pooled_output = self.pooler(encoder_layer)
1208
+ pooled_output = self.dropout(pooled_output)
1209
+ logits = self.classifier(pooled_output)
1210
+
1211
+ loss = None
1212
+ if labels is not None:
1213
+ if self.config.problem_type is None:
1214
+ if self.num_labels == 1:
1215
+ # regression task
1216
+ loss_fn = nn.MSELoss()
1217
+ logits = logits.view(-1).to(labels.dtype)
1218
+ loss = loss_fn(logits, labels.view(-1))
1219
+ elif labels.dim() == 1 or labels.size(-1) == 1:
1220
+ label_index = (labels >= 0).nonzero()
1221
+ labels = labels.long()
1222
+ if label_index.size(0) > 0:
1223
+ labeled_logits = torch.gather(
1224
+ logits, 0, label_index.expand(label_index.size(0), logits.size(1))
1225
+ )
1226
+ labels = torch.gather(labels, 0, label_index.view(-1))
1227
+ loss_fct = CrossEntropyLoss()
1228
+ loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
1229
+ else:
1230
+ loss = torch.tensor(0).to(logits)
1231
+ else:
1232
+ log_softmax = nn.LogSoftmax(-1)
1233
+ loss = -((log_softmax(logits) * labels).sum(-1)).mean()
1234
+ elif self.config.problem_type == "regression":
1235
+ loss_fct = MSELoss()
1236
+ if self.num_labels == 1:
1237
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1238
+ else:
1239
+ loss = loss_fct(logits, labels)
1240
+ elif self.config.problem_type == "single_label_classification":
1241
+ loss_fct = CrossEntropyLoss()
1242
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1243
+ elif self.config.problem_type == "multi_label_classification":
1244
+ loss_fct = BCEWithLogitsLoss()
1245
+ loss = loss_fct(logits, labels)
1246
+ if not return_dict:
1247
+ output = (logits,) + outputs[1:]
1248
+ return ((loss,) + output) if loss is not None else output
1249
+
1250
+ return SequenceClassifierOutput(
1251
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1252
+ )
1253
+
1254
+
1255
+ @add_start_docstrings(
1256
+ """
1257
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1258
+ Named-Entity-Recognition (NER) tasks.
1259
+ """,
1260
+ DEBERTA_START_DOCSTRING,
1261
+ )
1262
+ class DebertaForTokenClassification(DebertaPreTrainedModel):
1263
+ def __init__(self, config):
1264
+ super().__init__(config)
1265
+ self.num_labels = config.num_labels
1266
+
1267
+ self.deberta = DebertaModel(config)
1268
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1269
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1270
+
1271
+ # Initialize weights and apply final processing
1272
+ self.post_init()
1273
+
1274
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1275
+ @add_code_sample_docstrings(
1276
+ checkpoint=_CHECKPOINT_FOR_DOC,
1277
+ output_type=TokenClassifierOutput,
1278
+ config_class=_CONFIG_FOR_DOC,
1279
+ )
1280
+ def forward(
1281
+ self,
1282
+ input_ids: Optional[torch.Tensor] = None,
1283
+ attention_mask: Optional[torch.Tensor] = None,
1284
+ token_type_ids: Optional[torch.Tensor] = None,
1285
+ position_ids: Optional[torch.Tensor] = None,
1286
+ inputs_embeds: Optional[torch.Tensor] = None,
1287
+ labels: Optional[torch.Tensor] = None,
1288
+ output_attentions: Optional[bool] = None,
1289
+ output_hidden_states: Optional[bool] = None,
1290
+ return_dict: Optional[bool] = None,
1291
+ ) -> Union[Tuple, TokenClassifierOutput]:
1292
+ r"""
1293
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1294
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1295
+ """
1296
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1297
+
1298
+ outputs = self.deberta(
1299
+ input_ids,
1300
+ attention_mask=attention_mask,
1301
+ token_type_ids=token_type_ids,
1302
+ position_ids=position_ids,
1303
+ inputs_embeds=inputs_embeds,
1304
+ output_attentions=output_attentions,
1305
+ output_hidden_states=output_hidden_states,
1306
+ return_dict=return_dict,
1307
+ )
1308
+
1309
+ sequence_output = outputs[0]
1310
+
1311
+ sequence_output = self.dropout(sequence_output)
1312
+ logits = self.classifier(sequence_output)
1313
+
1314
+ loss = None
1315
+ if labels is not None:
1316
+ loss_fct = CrossEntropyLoss()
1317
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[1:]
1321
+ return ((loss,) + output) if loss is not None else output
1322
+
1323
+ return TokenClassifierOutput(
1324
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1325
+ )
1326
+
1327
+
1328
+ @add_start_docstrings(
1329
+ """
1330
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1331
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1332
+ """,
1333
+ DEBERTA_START_DOCSTRING,
1334
+ )
1335
+ class DebertaForQuestionAnswering(DebertaPreTrainedModel):
1336
+ def __init__(self, config):
1337
+ super().__init__(config)
1338
+ self.num_labels = config.num_labels
1339
+
1340
+ self.deberta = DebertaModel(config)
1341
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1342
+
1343
+ # Initialize weights and apply final processing
1344
+ self.post_init()
1345
+
1346
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1347
+ @add_code_sample_docstrings(
1348
+ checkpoint=_CHECKPOINT_FOR_QA,
1349
+ output_type=QuestionAnsweringModelOutput,
1350
+ config_class=_CONFIG_FOR_DOC,
1351
+ expected_output=_QA_EXPECTED_OUTPUT,
1352
+ expected_loss=_QA_EXPECTED_LOSS,
1353
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1354
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1355
+ )
1356
+ def forward(
1357
+ self,
1358
+ input_ids: Optional[torch.Tensor] = None,
1359
+ attention_mask: Optional[torch.Tensor] = None,
1360
+ token_type_ids: Optional[torch.Tensor] = None,
1361
+ position_ids: Optional[torch.Tensor] = None,
1362
+ inputs_embeds: Optional[torch.Tensor] = None,
1363
+ start_positions: Optional[torch.Tensor] = None,
1364
+ end_positions: Optional[torch.Tensor] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ return_dict: Optional[bool] = None,
1368
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1369
+ r"""
1370
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1371
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1372
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1373
+ are not taken into account for computing the loss.
1374
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ outputs = self.deberta(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ token_type_ids=token_type_ids,
1385
+ position_ids=position_ids,
1386
+ inputs_embeds=inputs_embeds,
1387
+ output_attentions=output_attentions,
1388
+ output_hidden_states=output_hidden_states,
1389
+ return_dict=return_dict,
1390
+ )
1391
+
1392
+ sequence_output = outputs[0]
1393
+
1394
+ logits = self.qa_outputs(sequence_output)
1395
+ start_logits, end_logits = logits.split(1, dim=-1)
1396
+ start_logits = start_logits.squeeze(-1).contiguous()
1397
+ end_logits = end_logits.squeeze(-1).contiguous()
1398
+
1399
+ total_loss = None
1400
+ if start_positions is not None and end_positions is not None:
1401
+ # If we are on multi-GPU, split add a dimension
1402
+ if len(start_positions.size()) > 1:
1403
+ start_positions = start_positions.squeeze(-1)
1404
+ if len(end_positions.size()) > 1:
1405
+ end_positions = end_positions.squeeze(-1)
1406
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1407
+ ignored_index = start_logits.size(1)
1408
+ start_positions = start_positions.clamp(0, ignored_index)
1409
+ end_positions = end_positions.clamp(0, ignored_index)
1410
+
1411
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1412
+ start_loss = loss_fct(start_logits, start_positions)
1413
+ end_loss = loss_fct(end_logits, end_positions)
1414
+ total_loss = (start_loss + end_loss) / 2
1415
+
1416
+ if not return_dict:
1417
+ output = (start_logits, end_logits) + outputs[1:]
1418
+ return ((total_loss,) + output) if total_loss is not None else output
1419
+
1420
+ return QuestionAnsweringModelOutput(
1421
+ loss=total_loss,
1422
+ start_logits=start_logits,
1423
+ end_logits=end_logits,
1424
+ hidden_states=outputs.hidden_states,
1425
+ attentions=outputs.attentions,
1426
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py ADDED
@@ -0,0 +1,1644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 DeBERTa model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ from typing import Dict, Optional, Sequence, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFMaskedLMOutput,
30
+ TFQuestionAnsweringModelOutput,
31
+ TFSequenceClassifierOutput,
32
+ TFTokenClassifierOutput,
33
+ )
34
+ from ...modeling_tf_utils import (
35
+ TFMaskedLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFQuestionAnsweringLoss,
39
+ TFSequenceClassificationLoss,
40
+ TFTokenClassificationLoss,
41
+ get_initializer,
42
+ keras,
43
+ unpack_inputs,
44
+ )
45
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
46
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
47
+ from .configuration_deberta import DebertaConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+
53
+ _CONFIG_FOR_DOC = "DebertaConfig"
54
+ _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base"
55
+
56
+
57
+ from ..deprecated._archive_maps import TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ class TFDebertaContextPooler(keras.layers.Layer):
61
+ def __init__(self, config: DebertaConfig, **kwargs):
62
+ super().__init__(**kwargs)
63
+ self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense")
64
+ self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout")
65
+ self.config = config
66
+
67
+ def call(self, hidden_states, training: bool = False):
68
+ # We "pool" the model by simply taking the hidden state corresponding
69
+ # to the first token.
70
+ context_token = hidden_states[:, 0]
71
+ context_token = self.dropout(context_token, training=training)
72
+ pooled_output = self.dense(context_token)
73
+ pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
74
+ return pooled_output
75
+
76
+ @property
77
+ def output_dim(self) -> int:
78
+ return self.config.hidden_size
79
+
80
+ def build(self, input_shape=None):
81
+ if self.built:
82
+ return
83
+ self.built = True
84
+ if getattr(self, "dense", None) is not None:
85
+ with tf.name_scope(self.dense.name):
86
+ self.dense.build([None, None, self.config.pooler_hidden_size])
87
+ if getattr(self, "dropout", None) is not None:
88
+ with tf.name_scope(self.dropout.name):
89
+ self.dropout.build(None)
90
+
91
+
92
+ class TFDebertaXSoftmax(keras.layers.Layer):
93
+ """
94
+ Masked Softmax which is optimized for saving memory
95
+
96
+ Args:
97
+ input (`tf.Tensor`): The input tensor that will apply softmax.
98
+ mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
99
+ dim (int): The dimension that will apply softmax
100
+ """
101
+
102
+ def __init__(self, axis=-1, **kwargs):
103
+ super().__init__(**kwargs)
104
+ self.axis = axis
105
+
106
+ def call(self, inputs: tf.Tensor, mask: tf.Tensor):
107
+ rmask = tf.logical_not(tf.cast(mask, tf.bool))
108
+ output = tf.where(rmask, float("-inf"), inputs)
109
+ output = stable_softmax(output, self.axis)
110
+ output = tf.where(rmask, 0.0, output)
111
+ return output
112
+
113
+
114
+ class TFDebertaStableDropout(keras.layers.Layer):
115
+ """
116
+ Optimized dropout module for stabilizing the training
117
+
118
+ Args:
119
+ drop_prob (float): the dropout probabilities
120
+ """
121
+
122
+ def __init__(self, drop_prob, **kwargs):
123
+ super().__init__(**kwargs)
124
+ self.drop_prob = drop_prob
125
+
126
+ @tf.custom_gradient
127
+ def xdropout(self, inputs):
128
+ """
129
+ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
130
+ """
131
+ mask = tf.cast(
132
+ 1
133
+ - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
134
+ tf.bool,
135
+ )
136
+ scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
137
+ if self.drop_prob > 0:
138
+ inputs = tf.where(mask, 0.0, inputs) * scale
139
+
140
+ def grad(upstream):
141
+ if self.drop_prob > 0:
142
+ return tf.where(mask, 0.0, upstream) * scale
143
+ else:
144
+ return upstream
145
+
146
+ return inputs, grad
147
+
148
+ def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
149
+ if training:
150
+ return self.xdropout(inputs)
151
+ return inputs
152
+
153
+
154
+ class TFDebertaLayerNorm(keras.layers.Layer):
155
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
156
+
157
+ def __init__(self, size, eps=1e-12, **kwargs):
158
+ super().__init__(**kwargs)
159
+ self.size = size
160
+ self.eps = eps
161
+
162
+ def build(self, input_shape):
163
+ self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight")
164
+ self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias")
165
+ return super().build(input_shape)
166
+
167
+ def call(self, x: tf.Tensor) -> tf.Tensor:
168
+ mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
169
+ variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
170
+ std = tf.math.sqrt(variance + self.eps)
171
+ return self.gamma * (x - mean) / std + self.beta
172
+
173
+
174
+ class TFDebertaSelfOutput(keras.layers.Layer):
175
+ def __init__(self, config: DebertaConfig, **kwargs):
176
+ super().__init__(**kwargs)
177
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
178
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
179
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
180
+ self.config = config
181
+
182
+ def call(self, hidden_states, input_tensor, training: bool = False):
183
+ hidden_states = self.dense(hidden_states)
184
+ hidden_states = self.dropout(hidden_states, training=training)
185
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
186
+ return hidden_states
187
+
188
+ def build(self, input_shape=None):
189
+ if self.built:
190
+ return
191
+ self.built = True
192
+ if getattr(self, "dense", None) is not None:
193
+ with tf.name_scope(self.dense.name):
194
+ self.dense.build([None, None, self.config.hidden_size])
195
+ if getattr(self, "LayerNorm", None) is not None:
196
+ with tf.name_scope(self.LayerNorm.name):
197
+ self.LayerNorm.build([None, None, self.config.hidden_size])
198
+ if getattr(self, "dropout", None) is not None:
199
+ with tf.name_scope(self.dropout.name):
200
+ self.dropout.build(None)
201
+
202
+
203
+ class TFDebertaAttention(keras.layers.Layer):
204
+ def __init__(self, config: DebertaConfig, **kwargs):
205
+ super().__init__(**kwargs)
206
+ self.self = TFDebertaDisentangledSelfAttention(config, name="self")
207
+ self.dense_output = TFDebertaSelfOutput(config, name="output")
208
+ self.config = config
209
+
210
+ def call(
211
+ self,
212
+ input_tensor: tf.Tensor,
213
+ attention_mask: tf.Tensor,
214
+ query_states: tf.Tensor = None,
215
+ relative_pos: tf.Tensor = None,
216
+ rel_embeddings: tf.Tensor = None,
217
+ output_attentions: bool = False,
218
+ training: bool = False,
219
+ ) -> Tuple[tf.Tensor]:
220
+ self_outputs = self.self(
221
+ hidden_states=input_tensor,
222
+ attention_mask=attention_mask,
223
+ query_states=query_states,
224
+ relative_pos=relative_pos,
225
+ rel_embeddings=rel_embeddings,
226
+ output_attentions=output_attentions,
227
+ training=training,
228
+ )
229
+ if query_states is None:
230
+ query_states = input_tensor
231
+ attention_output = self.dense_output(
232
+ hidden_states=self_outputs[0], input_tensor=query_states, training=training
233
+ )
234
+
235
+ output = (attention_output,) + self_outputs[1:]
236
+
237
+ return output
238
+
239
+ def build(self, input_shape=None):
240
+ if self.built:
241
+ return
242
+ self.built = True
243
+ if getattr(self, "self", None) is not None:
244
+ with tf.name_scope(self.self.name):
245
+ self.self.build(None)
246
+ if getattr(self, "dense_output", None) is not None:
247
+ with tf.name_scope(self.dense_output.name):
248
+ self.dense_output.build(None)
249
+
250
+
251
+ class TFDebertaIntermediate(keras.layers.Layer):
252
+ def __init__(self, config: DebertaConfig, **kwargs):
253
+ super().__init__(**kwargs)
254
+
255
+ self.dense = keras.layers.Dense(
256
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
257
+ )
258
+
259
+ if isinstance(config.hidden_act, str):
260
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
261
+ else:
262
+ self.intermediate_act_fn = config.hidden_act
263
+ self.config = config
264
+
265
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
266
+ hidden_states = self.dense(inputs=hidden_states)
267
+ hidden_states = self.intermediate_act_fn(hidden_states)
268
+
269
+ return hidden_states
270
+
271
+ def build(self, input_shape=None):
272
+ if self.built:
273
+ return
274
+ self.built = True
275
+ if getattr(self, "dense", None) is not None:
276
+ with tf.name_scope(self.dense.name):
277
+ self.dense.build([None, None, self.config.hidden_size])
278
+
279
+
280
+ class TFDebertaOutput(keras.layers.Layer):
281
+ def __init__(self, config: DebertaConfig, **kwargs):
282
+ super().__init__(**kwargs)
283
+
284
+ self.dense = keras.layers.Dense(
285
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
286
+ )
287
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
288
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
289
+ self.config = config
290
+
291
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
292
+ hidden_states = self.dense(inputs=hidden_states)
293
+ hidden_states = self.dropout(hidden_states, training=training)
294
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
295
+
296
+ return hidden_states
297
+
298
+ def build(self, input_shape=None):
299
+ if self.built:
300
+ return
301
+ self.built = True
302
+ if getattr(self, "dense", None) is not None:
303
+ with tf.name_scope(self.dense.name):
304
+ self.dense.build([None, None, self.config.intermediate_size])
305
+ if getattr(self, "LayerNorm", None) is not None:
306
+ with tf.name_scope(self.LayerNorm.name):
307
+ self.LayerNorm.build([None, None, self.config.hidden_size])
308
+ if getattr(self, "dropout", None) is not None:
309
+ with tf.name_scope(self.dropout.name):
310
+ self.dropout.build(None)
311
+
312
+
313
+ class TFDebertaLayer(keras.layers.Layer):
314
+ def __init__(self, config: DebertaConfig, **kwargs):
315
+ super().__init__(**kwargs)
316
+
317
+ self.attention = TFDebertaAttention(config, name="attention")
318
+ self.intermediate = TFDebertaIntermediate(config, name="intermediate")
319
+ self.bert_output = TFDebertaOutput(config, name="output")
320
+
321
+ def call(
322
+ self,
323
+ hidden_states: tf.Tensor,
324
+ attention_mask: tf.Tensor,
325
+ query_states: tf.Tensor = None,
326
+ relative_pos: tf.Tensor = None,
327
+ rel_embeddings: tf.Tensor = None,
328
+ output_attentions: bool = False,
329
+ training: bool = False,
330
+ ) -> Tuple[tf.Tensor]:
331
+ attention_outputs = self.attention(
332
+ input_tensor=hidden_states,
333
+ attention_mask=attention_mask,
334
+ query_states=query_states,
335
+ relative_pos=relative_pos,
336
+ rel_embeddings=rel_embeddings,
337
+ output_attentions=output_attentions,
338
+ training=training,
339
+ )
340
+ attention_output = attention_outputs[0]
341
+ intermediate_output = self.intermediate(hidden_states=attention_output)
342
+ layer_output = self.bert_output(
343
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
344
+ )
345
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
346
+
347
+ return outputs
348
+
349
+ def build(self, input_shape=None):
350
+ if self.built:
351
+ return
352
+ self.built = True
353
+ if getattr(self, "attention", None) is not None:
354
+ with tf.name_scope(self.attention.name):
355
+ self.attention.build(None)
356
+ if getattr(self, "intermediate", None) is not None:
357
+ with tf.name_scope(self.intermediate.name):
358
+ self.intermediate.build(None)
359
+ if getattr(self, "bert_output", None) is not None:
360
+ with tf.name_scope(self.bert_output.name):
361
+ self.bert_output.build(None)
362
+
363
+
364
+ class TFDebertaEncoder(keras.layers.Layer):
365
+ def __init__(self, config: DebertaConfig, **kwargs):
366
+ super().__init__(**kwargs)
367
+
368
+ self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
369
+ self.relative_attention = getattr(config, "relative_attention", False)
370
+ self.config = config
371
+ if self.relative_attention:
372
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
373
+ if self.max_relative_positions < 1:
374
+ self.max_relative_positions = config.max_position_embeddings
375
+
376
+ def build(self, input_shape=None):
377
+ if self.built:
378
+ return
379
+ self.built = True
380
+ if self.relative_attention:
381
+ self.rel_embeddings = self.add_weight(
382
+ name="rel_embeddings.weight",
383
+ shape=[self.max_relative_positions * 2, self.config.hidden_size],
384
+ initializer=get_initializer(self.config.initializer_range),
385
+ )
386
+ if getattr(self, "layer", None) is not None:
387
+ for layer in self.layer:
388
+ with tf.name_scope(layer.name):
389
+ layer.build(None)
390
+
391
+ def get_rel_embedding(self):
392
+ rel_embeddings = self.rel_embeddings if self.relative_attention else None
393
+ return rel_embeddings
394
+
395
+ def get_attention_mask(self, attention_mask):
396
+ if len(shape_list(attention_mask)) <= 2:
397
+ extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
398
+ attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
399
+ attention_mask = tf.cast(attention_mask, tf.uint8)
400
+ elif len(shape_list(attention_mask)) == 3:
401
+ attention_mask = tf.expand_dims(attention_mask, 1)
402
+
403
+ return attention_mask
404
+
405
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
406
+ if self.relative_attention and relative_pos is None:
407
+ q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
408
+ relative_pos = build_relative_position(q, shape_list(hidden_states)[-2])
409
+ return relative_pos
410
+
411
+ def call(
412
+ self,
413
+ hidden_states: tf.Tensor,
414
+ attention_mask: tf.Tensor,
415
+ query_states: tf.Tensor = None,
416
+ relative_pos: tf.Tensor = None,
417
+ output_attentions: bool = False,
418
+ output_hidden_states: bool = False,
419
+ return_dict: bool = True,
420
+ training: bool = False,
421
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
422
+ all_hidden_states = () if output_hidden_states else None
423
+ all_attentions = () if output_attentions else None
424
+
425
+ attention_mask = self.get_attention_mask(attention_mask)
426
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
427
+
428
+ if isinstance(hidden_states, Sequence):
429
+ next_kv = hidden_states[0]
430
+ else:
431
+ next_kv = hidden_states
432
+
433
+ rel_embeddings = self.get_rel_embedding()
434
+
435
+ for i, layer_module in enumerate(self.layer):
436
+ if output_hidden_states:
437
+ all_hidden_states = all_hidden_states + (hidden_states,)
438
+
439
+ layer_outputs = layer_module(
440
+ hidden_states=next_kv,
441
+ attention_mask=attention_mask,
442
+ query_states=query_states,
443
+ relative_pos=relative_pos,
444
+ rel_embeddings=rel_embeddings,
445
+ output_attentions=output_attentions,
446
+ training=training,
447
+ )
448
+ hidden_states = layer_outputs[0]
449
+
450
+ if query_states is not None:
451
+ query_states = hidden_states
452
+ if isinstance(hidden_states, Sequence):
453
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
454
+ else:
455
+ next_kv = hidden_states
456
+
457
+ if output_attentions:
458
+ all_attentions = all_attentions + (layer_outputs[1],)
459
+
460
+ # Add last layer
461
+ if output_hidden_states:
462
+ all_hidden_states = all_hidden_states + (hidden_states,)
463
+
464
+ if not return_dict:
465
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
466
+
467
+ return TFBaseModelOutput(
468
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
469
+ )
470
+
471
+
472
+ def build_relative_position(query_size, key_size):
473
+ """
474
+ Build relative position according to the query and key
475
+
476
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
477
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
478
+ P_k\\)
479
+
480
+ Args:
481
+ query_size (int): the length of query
482
+ key_size (int): the length of key
483
+
484
+ Return:
485
+ `tf.Tensor`: A tensor with shape [1, query_size, key_size]
486
+
487
+ """
488
+ q_ids = tf.range(query_size, dtype=tf.int32)
489
+ k_ids = tf.range(key_size, dtype=tf.int32)
490
+ rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
491
+ rel_pos_ids = rel_pos_ids[:query_size, :]
492
+ rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
493
+ return tf.cast(rel_pos_ids, tf.int64)
494
+
495
+
496
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
497
+ shapes = [
498
+ shape_list(query_layer)[0],
499
+ shape_list(query_layer)[1],
500
+ shape_list(query_layer)[2],
501
+ shape_list(relative_pos)[-1],
502
+ ]
503
+ return tf.broadcast_to(c2p_pos, shapes)
504
+
505
+
506
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
507
+ shapes = [
508
+ shape_list(query_layer)[0],
509
+ shape_list(query_layer)[1],
510
+ shape_list(key_layer)[-2],
511
+ shape_list(key_layer)[-2],
512
+ ]
513
+ return tf.broadcast_to(c2p_pos, shapes)
514
+
515
+
516
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
517
+ shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
518
+ return tf.broadcast_to(pos_index, shapes)
519
+
520
+
521
+ def torch_gather(x, indices, gather_axis):
522
+ if gather_axis < 0:
523
+ gather_axis = tf.rank(x) + gather_axis
524
+
525
+ if gather_axis != tf.rank(x) - 1:
526
+ pre_roll = tf.rank(x) - 1 - gather_axis
527
+ permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)
528
+ x = tf.transpose(x, perm=permutation)
529
+ indices = tf.transpose(indices, perm=permutation)
530
+ else:
531
+ pre_roll = 0
532
+
533
+ flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))
534
+ flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))
535
+ gathered = tf.gather(flat_x, flat_indices, batch_dims=1)
536
+ gathered = tf.reshape(gathered, tf.shape(indices))
537
+
538
+ if pre_roll != 0:
539
+ permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)
540
+ gathered = tf.transpose(gathered, perm=permutation)
541
+
542
+ return gathered
543
+
544
+
545
+ class TFDebertaDisentangledSelfAttention(keras.layers.Layer):
546
+ """
547
+ Disentangled self-attention module
548
+
549
+ Parameters:
550
+ config (`str`):
551
+ A model config class instance with the configuration to build a new model. The schema is similar to
552
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
553
+
554
+ """
555
+
556
+ def __init__(self, config: DebertaConfig, **kwargs):
557
+ super().__init__(**kwargs)
558
+ if config.hidden_size % config.num_attention_heads != 0:
559
+ raise ValueError(
560
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
561
+ f"heads ({config.num_attention_heads})"
562
+ )
563
+ self.num_attention_heads = config.num_attention_heads
564
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
565
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
566
+ self.in_proj = keras.layers.Dense(
567
+ self.all_head_size * 3,
568
+ kernel_initializer=get_initializer(config.initializer_range),
569
+ name="in_proj",
570
+ use_bias=False,
571
+ )
572
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
573
+
574
+ self.relative_attention = getattr(config, "relative_attention", False)
575
+ self.talking_head = getattr(config, "talking_head", False)
576
+
577
+ if self.talking_head:
578
+ self.head_logits_proj = keras.layers.Dense(
579
+ self.num_attention_heads,
580
+ kernel_initializer=get_initializer(config.initializer_range),
581
+ name="head_logits_proj",
582
+ use_bias=False,
583
+ )
584
+ self.head_weights_proj = keras.layers.Dense(
585
+ self.num_attention_heads,
586
+ kernel_initializer=get_initializer(config.initializer_range),
587
+ name="head_weights_proj",
588
+ use_bias=False,
589
+ )
590
+
591
+ self.softmax = TFDebertaXSoftmax(axis=-1)
592
+
593
+ if self.relative_attention:
594
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
595
+ if self.max_relative_positions < 1:
596
+ self.max_relative_positions = config.max_position_embeddings
597
+ self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout")
598
+ if "c2p" in self.pos_att_type:
599
+ self.pos_proj = keras.layers.Dense(
600
+ self.all_head_size,
601
+ kernel_initializer=get_initializer(config.initializer_range),
602
+ name="pos_proj",
603
+ use_bias=False,
604
+ )
605
+ if "p2c" in self.pos_att_type:
606
+ self.pos_q_proj = keras.layers.Dense(
607
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj"
608
+ )
609
+
610
+ self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout")
611
+ self.config = config
612
+
613
+ def build(self, input_shape=None):
614
+ if self.built:
615
+ return
616
+ self.built = True
617
+ self.q_bias = self.add_weight(
618
+ name="q_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
619
+ )
620
+ self.v_bias = self.add_weight(
621
+ name="v_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
622
+ )
623
+ if getattr(self, "in_proj", None) is not None:
624
+ with tf.name_scope(self.in_proj.name):
625
+ self.in_proj.build([None, None, self.config.hidden_size])
626
+ if getattr(self, "dropout", None) is not None:
627
+ with tf.name_scope(self.dropout.name):
628
+ self.dropout.build(None)
629
+ if getattr(self, "head_logits_proj", None) is not None:
630
+ with tf.name_scope(self.head_logits_proj.name):
631
+ self.head_logits_proj.build(None)
632
+ if getattr(self, "head_weights_proj", None) is not None:
633
+ with tf.name_scope(self.head_weights_proj.name):
634
+ self.head_weights_proj.build(None)
635
+ if getattr(self, "pos_dropout", None) is not None:
636
+ with tf.name_scope(self.pos_dropout.name):
637
+ self.pos_dropout.build(None)
638
+ if getattr(self, "pos_proj", None) is not None:
639
+ with tf.name_scope(self.pos_proj.name):
640
+ self.pos_proj.build([self.config.hidden_size])
641
+ if getattr(self, "pos_q_proj", None) is not None:
642
+ with tf.name_scope(self.pos_q_proj.name):
643
+ self.pos_q_proj.build([self.config.hidden_size])
644
+
645
+ def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor:
646
+ shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1]
647
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
648
+ tensor = tf.reshape(tensor=tensor, shape=shape)
649
+
650
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
651
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
652
+
653
+ def call(
654
+ self,
655
+ hidden_states: tf.Tensor,
656
+ attention_mask: tf.Tensor,
657
+ query_states: tf.Tensor = None,
658
+ relative_pos: tf.Tensor = None,
659
+ rel_embeddings: tf.Tensor = None,
660
+ output_attentions: bool = False,
661
+ training: bool = False,
662
+ ) -> Tuple[tf.Tensor]:
663
+ """
664
+ Call the module
665
+
666
+ Args:
667
+ hidden_states (`tf.Tensor`):
668
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
669
+ *Attention(Q,K,V)*
670
+
671
+ attention_mask (`tf.Tensor`):
672
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
673
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
674
+ th token.
675
+
676
+ return_att (`bool`, optional):
677
+ Whether return the attention matrix.
678
+
679
+ query_states (`tf.Tensor`, optional):
680
+ The *Q* state in *Attention(Q,K,V)*.
681
+
682
+ relative_pos (`tf.Tensor`):
683
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
684
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
685
+
686
+ rel_embeddings (`tf.Tensor`):
687
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
688
+ \\text{max_relative_positions}\\), *hidden_size*].
689
+
690
+
691
+ """
692
+ if query_states is None:
693
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
694
+ query_layer, key_layer, value_layer = tf.split(
695
+ self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1
696
+ )
697
+ else:
698
+
699
+ def linear(w, b, x):
700
+ out = tf.matmul(x, w, transpose_b=True)
701
+ if b is not None:
702
+ out += tf.transpose(b)
703
+ return out
704
+
705
+ ws = tf.split(
706
+ tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0
707
+ )
708
+ qkvw = tf.TensorArray(dtype=tf.float32, size=3)
709
+ for k in tf.range(3):
710
+ qkvw_inside = tf.TensorArray(dtype=tf.float32, size=self.num_attention_heads)
711
+ for i in tf.range(self.num_attention_heads):
712
+ qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k])
713
+ qkvw = qkvw.write(k, qkvw_inside.concat())
714
+ qkvb = [None] * 3
715
+
716
+ q = linear(qkvw[0], qkvb[0], query_states)
717
+ k = linear(qkvw[1], qkvb[1], hidden_states)
718
+ v = linear(qkvw[2], qkvb[2], hidden_states)
719
+ query_layer = self.transpose_for_scores(q)
720
+ key_layer = self.transpose_for_scores(k)
721
+ value_layer = self.transpose_for_scores(v)
722
+
723
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
724
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
725
+
726
+ rel_att = None
727
+ # Take the dot product between "query" and "key" to get the raw attention scores.
728
+ scale_factor = 1 + len(self.pos_att_type)
729
+ scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor)
730
+ query_layer = query_layer / scale
731
+
732
+ attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
733
+ if self.relative_attention:
734
+ rel_embeddings = self.pos_dropout(rel_embeddings, training=training)
735
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
736
+
737
+ if rel_att is not None:
738
+ attention_scores = attention_scores + rel_att
739
+
740
+ if self.talking_head:
741
+ attention_scores = tf.transpose(
742
+ self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2]
743
+ )
744
+
745
+ attention_probs = self.softmax(attention_scores, attention_mask)
746
+ attention_probs = self.dropout(attention_probs, training=training)
747
+ if self.talking_head:
748
+ attention_probs = tf.transpose(
749
+ self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2]
750
+ )
751
+
752
+ context_layer = tf.matmul(attention_probs, value_layer)
753
+ context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
754
+ context_layer_shape = shape_list(context_layer)
755
+ # Set the final dimension here explicitly.
756
+ # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
757
+ # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
758
+ # requires final input dimension to be defined
759
+ new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
760
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
761
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
762
+ return outputs
763
+
764
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
765
+ if relative_pos is None:
766
+ q = shape_list(query_layer)[-2]
767
+ relative_pos = build_relative_position(q, shape_list(key_layer)[-2])
768
+ shape_list_pos = shape_list(relative_pos)
769
+ if len(shape_list_pos) == 2:
770
+ relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
771
+ elif len(shape_list_pos) == 3:
772
+ relative_pos = tf.expand_dims(relative_pos, 1)
773
+ # bxhxqxk
774
+ elif len(shape_list_pos) != 4:
775
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
776
+
777
+ att_span = tf.cast(
778
+ tf.minimum(
779
+ tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions
780
+ ),
781
+ tf.int64,
782
+ )
783
+ rel_embeddings = tf.expand_dims(
784
+ rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0
785
+ )
786
+
787
+ score = 0
788
+
789
+ # content->position
790
+ if "c2p" in self.pos_att_type:
791
+ pos_key_layer = self.pos_proj(rel_embeddings)
792
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
793
+ c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2]))
794
+ c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
795
+ c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1)
796
+ score += c2p_att
797
+
798
+ # position->content
799
+ if "p2c" in self.pos_att_type:
800
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
801
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
802
+ pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=tf.float32))
803
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
804
+ r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2])
805
+ else:
806
+ r_pos = relative_pos
807
+ p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
808
+ p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2]))
809
+ p2c_att = tf.transpose(
810
+ torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2]
811
+ )
812
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
813
+ pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1)
814
+ p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2)
815
+ score += p2c_att
816
+
817
+ return score
818
+
819
+
820
+ class TFDebertaEmbeddings(keras.layers.Layer):
821
+ """Construct the embeddings from word, position and token_type embeddings."""
822
+
823
+ def __init__(self, config, **kwargs):
824
+ super().__init__(**kwargs)
825
+
826
+ self.config = config
827
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
828
+ self.hidden_size = config.hidden_size
829
+ self.max_position_embeddings = config.max_position_embeddings
830
+ self.position_biased_input = getattr(config, "position_biased_input", True)
831
+ self.initializer_range = config.initializer_range
832
+ if self.embedding_size != config.hidden_size:
833
+ self.embed_proj = keras.layers.Dense(
834
+ config.hidden_size,
835
+ kernel_initializer=get_initializer(config.initializer_range),
836
+ name="embed_proj",
837
+ use_bias=False,
838
+ )
839
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
840
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
841
+
842
+ def build(self, input_shape=None):
843
+ with tf.name_scope("word_embeddings"):
844
+ self.weight = self.add_weight(
845
+ name="weight",
846
+ shape=[self.config.vocab_size, self.embedding_size],
847
+ initializer=get_initializer(self.initializer_range),
848
+ )
849
+
850
+ with tf.name_scope("token_type_embeddings"):
851
+ if self.config.type_vocab_size > 0:
852
+ self.token_type_embeddings = self.add_weight(
853
+ name="embeddings",
854
+ shape=[self.config.type_vocab_size, self.embedding_size],
855
+ initializer=get_initializer(self.initializer_range),
856
+ )
857
+ else:
858
+ self.token_type_embeddings = None
859
+
860
+ with tf.name_scope("position_embeddings"):
861
+ if self.position_biased_input:
862
+ self.position_embeddings = self.add_weight(
863
+ name="embeddings",
864
+ shape=[self.max_position_embeddings, self.hidden_size],
865
+ initializer=get_initializer(self.initializer_range),
866
+ )
867
+ else:
868
+ self.position_embeddings = None
869
+
870
+ if self.built:
871
+ return
872
+ self.built = True
873
+ if getattr(self, "LayerNorm", None) is not None:
874
+ with tf.name_scope(self.LayerNorm.name):
875
+ self.LayerNorm.build([None, None, self.config.hidden_size])
876
+ if getattr(self, "dropout", None) is not None:
877
+ with tf.name_scope(self.dropout.name):
878
+ self.dropout.build(None)
879
+ if getattr(self, "embed_proj", None) is not None:
880
+ with tf.name_scope(self.embed_proj.name):
881
+ self.embed_proj.build([None, None, self.embedding_size])
882
+
883
+ def call(
884
+ self,
885
+ input_ids: tf.Tensor = None,
886
+ position_ids: tf.Tensor = None,
887
+ token_type_ids: tf.Tensor = None,
888
+ inputs_embeds: tf.Tensor = None,
889
+ mask: tf.Tensor = None,
890
+ training: bool = False,
891
+ ) -> tf.Tensor:
892
+ """
893
+ Applies embedding based on inputs tensor.
894
+
895
+ Returns:
896
+ final_embeddings (`tf.Tensor`): output embedding tensor.
897
+ """
898
+ if input_ids is None and inputs_embeds is None:
899
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
900
+
901
+ if input_ids is not None:
902
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
903
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
904
+
905
+ input_shape = shape_list(inputs_embeds)[:-1]
906
+
907
+ if token_type_ids is None:
908
+ token_type_ids = tf.fill(dims=input_shape, value=0)
909
+
910
+ if position_ids is None:
911
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
912
+
913
+ final_embeddings = inputs_embeds
914
+ if self.position_biased_input:
915
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
916
+ final_embeddings += position_embeds
917
+ if self.config.type_vocab_size > 0:
918
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
919
+ final_embeddings += token_type_embeds
920
+
921
+ if self.embedding_size != self.hidden_size:
922
+ final_embeddings = self.embed_proj(final_embeddings)
923
+
924
+ final_embeddings = self.LayerNorm(final_embeddings)
925
+
926
+ if mask is not None:
927
+ if len(shape_list(mask)) != len(shape_list(final_embeddings)):
928
+ if len(shape_list(mask)) == 4:
929
+ mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
930
+ mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
931
+
932
+ final_embeddings = final_embeddings * mask
933
+
934
+ final_embeddings = self.dropout(final_embeddings, training=training)
935
+
936
+ return final_embeddings
937
+
938
+
939
+ class TFDebertaPredictionHeadTransform(keras.layers.Layer):
940
+ def __init__(self, config: DebertaConfig, **kwargs):
941
+ super().__init__(**kwargs)
942
+
943
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
944
+
945
+ self.dense = keras.layers.Dense(
946
+ units=self.embedding_size,
947
+ kernel_initializer=get_initializer(config.initializer_range),
948
+ name="dense",
949
+ )
950
+
951
+ if isinstance(config.hidden_act, str):
952
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
953
+ else:
954
+ self.transform_act_fn = config.hidden_act
955
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
956
+ self.config = config
957
+
958
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
959
+ hidden_states = self.dense(inputs=hidden_states)
960
+ hidden_states = self.transform_act_fn(hidden_states)
961
+ hidden_states = self.LayerNorm(hidden_states)
962
+
963
+ return hidden_states
964
+
965
+ def build(self, input_shape=None):
966
+ if self.built:
967
+ return
968
+ self.built = True
969
+ if getattr(self, "dense", None) is not None:
970
+ with tf.name_scope(self.dense.name):
971
+ self.dense.build([None, None, self.config.hidden_size])
972
+ if getattr(self, "LayerNorm", None) is not None:
973
+ with tf.name_scope(self.LayerNorm.name):
974
+ self.LayerNorm.build([None, None, self.embedding_size])
975
+
976
+
977
+ class TFDebertaLMPredictionHead(keras.layers.Layer):
978
+ def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
979
+ super().__init__(**kwargs)
980
+
981
+ self.config = config
982
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
983
+
984
+ self.transform = TFDebertaPredictionHeadTransform(config, name="transform")
985
+
986
+ # The output weights are the same as the input embeddings, but there is
987
+ # an output-only bias for each token.
988
+ self.input_embeddings = input_embeddings
989
+
990
+ def build(self, input_shape=None):
991
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
992
+
993
+ if self.built:
994
+ return
995
+ self.built = True
996
+ if getattr(self, "transform", None) is not None:
997
+ with tf.name_scope(self.transform.name):
998
+ self.transform.build(None)
999
+
1000
+ def get_output_embeddings(self) -> keras.layers.Layer:
1001
+ return self.input_embeddings
1002
+
1003
+ def set_output_embeddings(self, value: tf.Variable):
1004
+ self.input_embeddings.weight = value
1005
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1006
+
1007
+ def get_bias(self) -> Dict[str, tf.Variable]:
1008
+ return {"bias": self.bias}
1009
+
1010
+ def set_bias(self, value: tf.Variable):
1011
+ self.bias = value["bias"]
1012
+ self.config.vocab_size = shape_list(value["bias"])[0]
1013
+
1014
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1015
+ hidden_states = self.transform(hidden_states=hidden_states)
1016
+ seq_length = shape_list(hidden_states)[1]
1017
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
1018
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1019
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1020
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1021
+
1022
+ return hidden_states
1023
+
1024
+
1025
+ class TFDebertaOnlyMLMHead(keras.layers.Layer):
1026
+ def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
1027
+ super().__init__(**kwargs)
1028
+ self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions")
1029
+
1030
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1031
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1032
+
1033
+ return prediction_scores
1034
+
1035
+ def build(self, input_shape=None):
1036
+ if self.built:
1037
+ return
1038
+ self.built = True
1039
+ if getattr(self, "predictions", None) is not None:
1040
+ with tf.name_scope(self.predictions.name):
1041
+ self.predictions.build(None)
1042
+
1043
+
1044
+ # @keras_serializable
1045
+ class TFDebertaMainLayer(keras.layers.Layer):
1046
+ config_class = DebertaConfig
1047
+
1048
+ def __init__(self, config: DebertaConfig, **kwargs):
1049
+ super().__init__(**kwargs)
1050
+
1051
+ self.config = config
1052
+
1053
+ self.embeddings = TFDebertaEmbeddings(config, name="embeddings")
1054
+ self.encoder = TFDebertaEncoder(config, name="encoder")
1055
+
1056
+ def get_input_embeddings(self) -> keras.layers.Layer:
1057
+ return self.embeddings
1058
+
1059
+ def set_input_embeddings(self, value: tf.Variable):
1060
+ self.embeddings.weight = value
1061
+ self.embeddings.vocab_size = shape_list(value)[0]
1062
+
1063
+ def _prune_heads(self, heads_to_prune):
1064
+ """
1065
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1066
+ class PreTrainedModel
1067
+ """
1068
+ raise NotImplementedError
1069
+
1070
+ @unpack_inputs
1071
+ def call(
1072
+ self,
1073
+ input_ids: TFModelInputType | None = None,
1074
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1075
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1076
+ position_ids: np.ndarray | tf.Tensor | None = None,
1077
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1078
+ output_attentions: Optional[bool] = None,
1079
+ output_hidden_states: Optional[bool] = None,
1080
+ return_dict: Optional[bool] = None,
1081
+ training: bool = False,
1082
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1083
+ if input_ids is not None and inputs_embeds is not None:
1084
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1085
+ elif input_ids is not None:
1086
+ input_shape = shape_list(input_ids)
1087
+ elif inputs_embeds is not None:
1088
+ input_shape = shape_list(inputs_embeds)[:-1]
1089
+ else:
1090
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1091
+
1092
+ if attention_mask is None:
1093
+ attention_mask = tf.fill(dims=input_shape, value=1)
1094
+
1095
+ if token_type_ids is None:
1096
+ token_type_ids = tf.fill(dims=input_shape, value=0)
1097
+
1098
+ embedding_output = self.embeddings(
1099
+ input_ids=input_ids,
1100
+ position_ids=position_ids,
1101
+ token_type_ids=token_type_ids,
1102
+ inputs_embeds=inputs_embeds,
1103
+ mask=attention_mask,
1104
+ training=training,
1105
+ )
1106
+
1107
+ encoder_outputs = self.encoder(
1108
+ hidden_states=embedding_output,
1109
+ attention_mask=attention_mask,
1110
+ output_attentions=output_attentions,
1111
+ output_hidden_states=output_hidden_states,
1112
+ return_dict=return_dict,
1113
+ training=training,
1114
+ )
1115
+
1116
+ sequence_output = encoder_outputs[0]
1117
+
1118
+ if not return_dict:
1119
+ return (sequence_output,) + encoder_outputs[1:]
1120
+
1121
+ return TFBaseModelOutput(
1122
+ last_hidden_state=sequence_output,
1123
+ hidden_states=encoder_outputs.hidden_states,
1124
+ attentions=encoder_outputs.attentions,
1125
+ )
1126
+
1127
+ def build(self, input_shape=None):
1128
+ if self.built:
1129
+ return
1130
+ self.built = True
1131
+ if getattr(self, "embeddings", None) is not None:
1132
+ with tf.name_scope(self.embeddings.name):
1133
+ self.embeddings.build(None)
1134
+ if getattr(self, "encoder", None) is not None:
1135
+ with tf.name_scope(self.encoder.name):
1136
+ self.encoder.build(None)
1137
+
1138
+
1139
+ class TFDebertaPreTrainedModel(TFPreTrainedModel):
1140
+ """
1141
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1142
+ models.
1143
+ """
1144
+
1145
+ config_class = DebertaConfig
1146
+ base_model_prefix = "deberta"
1147
+
1148
+
1149
+ DEBERTA_START_DOCSTRING = r"""
1150
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
1151
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
1152
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
1153
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
1154
+
1155
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1156
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1157
+ behavior.
1158
+
1159
+ <Tip>
1160
+
1161
+ TensorFlow models and layers in `transformers` accept two formats as input:
1162
+
1163
+ - having all inputs as keyword arguments (like PyTorch models), or
1164
+ - having all inputs as a list, tuple or dict in the first positional argument.
1165
+
1166
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1167
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1168
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1169
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1170
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1171
+ positional argument:
1172
+
1173
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1174
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1175
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1176
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1177
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1178
+
1179
+ Note that when creating models and layers with
1180
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1181
+ about any of this, as you can just pass inputs like you would to any other Python function!
1182
+
1183
+ </Tip>
1184
+
1185
+ Parameters:
1186
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
1187
+ Initializing with a config file does not load the weights associated with the model, only the
1188
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1189
+ """
1190
+
1191
+ DEBERTA_INPUTS_DOCSTRING = r"""
1192
+ Args:
1193
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1194
+ Indices of input sequence tokens in the vocabulary.
1195
+
1196
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1197
+ [`PreTrainedTokenizer.__call__`] for details.
1198
+
1199
+ [What are input IDs?](../glossary#input-ids)
1200
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1201
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1202
+
1203
+ - 1 for tokens that are **not masked**,
1204
+ - 0 for tokens that are **masked**.
1205
+
1206
+ [What are attention masks?](../glossary#attention-mask)
1207
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1208
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1209
+ 1]`:
1210
+
1211
+ - 0 corresponds to a *sentence A* token,
1212
+ - 1 corresponds to a *sentence B* token.
1213
+
1214
+ [What are token type IDs?](../glossary#token-type-ids)
1215
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1216
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1217
+ config.max_position_embeddings - 1]`.
1218
+
1219
+ [What are position IDs?](../glossary#position-ids)
1220
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1221
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1222
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
1223
+ model's internal embedding lookup matrix.
1224
+ output_attentions (`bool`, *optional*):
1225
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1226
+ tensors for more detail.
1227
+ output_hidden_states (`bool`, *optional*):
1228
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1229
+ more detail.
1230
+ return_dict (`bool`, *optional*):
1231
+ Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
1232
+ """
1233
+
1234
+
1235
+ @add_start_docstrings(
1236
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
1237
+ DEBERTA_START_DOCSTRING,
1238
+ )
1239
+ class TFDebertaModel(TFDebertaPreTrainedModel):
1240
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1241
+ super().__init__(config, *inputs, **kwargs)
1242
+
1243
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1244
+
1245
+ @unpack_inputs
1246
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1247
+ @add_code_sample_docstrings(
1248
+ checkpoint=_CHECKPOINT_FOR_DOC,
1249
+ output_type=TFBaseModelOutput,
1250
+ config_class=_CONFIG_FOR_DOC,
1251
+ )
1252
+ def call(
1253
+ self,
1254
+ input_ids: TFModelInputType | None = None,
1255
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1256
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1257
+ position_ids: np.ndarray | tf.Tensor | None = None,
1258
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1259
+ output_attentions: Optional[bool] = None,
1260
+ output_hidden_states: Optional[bool] = None,
1261
+ return_dict: Optional[bool] = None,
1262
+ training: Optional[bool] = False,
1263
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1264
+ outputs = self.deberta(
1265
+ input_ids=input_ids,
1266
+ attention_mask=attention_mask,
1267
+ token_type_ids=token_type_ids,
1268
+ position_ids=position_ids,
1269
+ inputs_embeds=inputs_embeds,
1270
+ output_attentions=output_attentions,
1271
+ output_hidden_states=output_hidden_states,
1272
+ return_dict=return_dict,
1273
+ training=training,
1274
+ )
1275
+
1276
+ return outputs
1277
+
1278
+ def build(self, input_shape=None):
1279
+ if self.built:
1280
+ return
1281
+ self.built = True
1282
+ if getattr(self, "deberta", None) is not None:
1283
+ with tf.name_scope(self.deberta.name):
1284
+ self.deberta.build(None)
1285
+
1286
+
1287
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1288
+ class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss):
1289
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1290
+ super().__init__(config, *inputs, **kwargs)
1291
+
1292
+ if config.is_decoder:
1293
+ logger.warning(
1294
+ "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for "
1295
+ "bi-directional self-attention."
1296
+ )
1297
+
1298
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1299
+ self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
1300
+
1301
+ def get_lm_head(self) -> keras.layers.Layer:
1302
+ return self.mlm.predictions
1303
+
1304
+ @unpack_inputs
1305
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1306
+ @add_code_sample_docstrings(
1307
+ checkpoint=_CHECKPOINT_FOR_DOC,
1308
+ output_type=TFMaskedLMOutput,
1309
+ config_class=_CONFIG_FOR_DOC,
1310
+ )
1311
+ def call(
1312
+ self,
1313
+ input_ids: TFModelInputType | None = None,
1314
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1315
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1316
+ position_ids: np.ndarray | tf.Tensor | None = None,
1317
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1318
+ output_attentions: Optional[bool] = None,
1319
+ output_hidden_states: Optional[bool] = None,
1320
+ return_dict: Optional[bool] = None,
1321
+ labels: np.ndarray | tf.Tensor | None = None,
1322
+ training: Optional[bool] = False,
1323
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1324
+ r"""
1325
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1326
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1327
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1328
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1329
+ """
1330
+ outputs = self.deberta(
1331
+ input_ids=input_ids,
1332
+ attention_mask=attention_mask,
1333
+ token_type_ids=token_type_ids,
1334
+ position_ids=position_ids,
1335
+ inputs_embeds=inputs_embeds,
1336
+ output_attentions=output_attentions,
1337
+ output_hidden_states=output_hidden_states,
1338
+ return_dict=return_dict,
1339
+ training=training,
1340
+ )
1341
+ sequence_output = outputs[0]
1342
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1343
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1344
+
1345
+ if not return_dict:
1346
+ output = (prediction_scores,) + outputs[2:]
1347
+ return ((loss,) + output) if loss is not None else output
1348
+
1349
+ return TFMaskedLMOutput(
1350
+ loss=loss,
1351
+ logits=prediction_scores,
1352
+ hidden_states=outputs.hidden_states,
1353
+ attentions=outputs.attentions,
1354
+ )
1355
+
1356
+ def build(self, input_shape=None):
1357
+ if self.built:
1358
+ return
1359
+ self.built = True
1360
+ if getattr(self, "deberta", None) is not None:
1361
+ with tf.name_scope(self.deberta.name):
1362
+ self.deberta.build(None)
1363
+ if getattr(self, "mlm", None) is not None:
1364
+ with tf.name_scope(self.mlm.name):
1365
+ self.mlm.build(None)
1366
+
1367
+
1368
+ @add_start_docstrings(
1369
+ """
1370
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1371
+ pooled output) e.g. for GLUE tasks.
1372
+ """,
1373
+ DEBERTA_START_DOCSTRING,
1374
+ )
1375
+ class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss):
1376
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1377
+ super().__init__(config, *inputs, **kwargs)
1378
+
1379
+ self.num_labels = config.num_labels
1380
+
1381
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1382
+ self.pooler = TFDebertaContextPooler(config, name="pooler")
1383
+
1384
+ drop_out = getattr(config, "cls_dropout", None)
1385
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1386
+ self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout")
1387
+ self.classifier = keras.layers.Dense(
1388
+ units=config.num_labels,
1389
+ kernel_initializer=get_initializer(config.initializer_range),
1390
+ name="classifier",
1391
+ )
1392
+ self.output_dim = self.pooler.output_dim
1393
+
1394
+ @unpack_inputs
1395
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1396
+ @add_code_sample_docstrings(
1397
+ checkpoint=_CHECKPOINT_FOR_DOC,
1398
+ output_type=TFSequenceClassifierOutput,
1399
+ config_class=_CONFIG_FOR_DOC,
1400
+ )
1401
+ def call(
1402
+ self,
1403
+ input_ids: TFModelInputType | None = None,
1404
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1405
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1406
+ position_ids: np.ndarray | tf.Tensor | None = None,
1407
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1408
+ output_attentions: Optional[bool] = None,
1409
+ output_hidden_states: Optional[bool] = None,
1410
+ return_dict: Optional[bool] = None,
1411
+ labels: np.ndarray | tf.Tensor | None = None,
1412
+ training: Optional[bool] = False,
1413
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1414
+ r"""
1415
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1416
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1417
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1418
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1419
+ """
1420
+ outputs = self.deberta(
1421
+ input_ids=input_ids,
1422
+ attention_mask=attention_mask,
1423
+ token_type_ids=token_type_ids,
1424
+ position_ids=position_ids,
1425
+ inputs_embeds=inputs_embeds,
1426
+ output_attentions=output_attentions,
1427
+ output_hidden_states=output_hidden_states,
1428
+ return_dict=return_dict,
1429
+ training=training,
1430
+ )
1431
+ sequence_output = outputs[0]
1432
+ pooled_output = self.pooler(sequence_output, training=training)
1433
+ pooled_output = self.dropout(pooled_output, training=training)
1434
+ logits = self.classifier(pooled_output)
1435
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1436
+
1437
+ if not return_dict:
1438
+ output = (logits,) + outputs[1:]
1439
+
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return TFSequenceClassifierOutput(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ hidden_states=outputs.hidden_states,
1446
+ attentions=outputs.attentions,
1447
+ )
1448
+
1449
+ def build(self, input_shape=None):
1450
+ if self.built:
1451
+ return
1452
+ self.built = True
1453
+ if getattr(self, "deberta", None) is not None:
1454
+ with tf.name_scope(self.deberta.name):
1455
+ self.deberta.build(None)
1456
+ if getattr(self, "pooler", None) is not None:
1457
+ with tf.name_scope(self.pooler.name):
1458
+ self.pooler.build(None)
1459
+ if getattr(self, "dropout", None) is not None:
1460
+ with tf.name_scope(self.dropout.name):
1461
+ self.dropout.build(None)
1462
+ if getattr(self, "classifier", None) is not None:
1463
+ with tf.name_scope(self.classifier.name):
1464
+ self.classifier.build([None, None, self.output_dim])
1465
+
1466
+
1467
+ @add_start_docstrings(
1468
+ """
1469
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1470
+ Named-Entity-Recognition (NER) tasks.
1471
+ """,
1472
+ DEBERTA_START_DOCSTRING,
1473
+ )
1474
+ class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss):
1475
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1476
+ super().__init__(config, *inputs, **kwargs)
1477
+
1478
+ self.num_labels = config.num_labels
1479
+
1480
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1481
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1482
+ self.classifier = keras.layers.Dense(
1483
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1484
+ )
1485
+ self.config = config
1486
+
1487
+ @unpack_inputs
1488
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1489
+ @add_code_sample_docstrings(
1490
+ checkpoint=_CHECKPOINT_FOR_DOC,
1491
+ output_type=TFTokenClassifierOutput,
1492
+ config_class=_CONFIG_FOR_DOC,
1493
+ )
1494
+ def call(
1495
+ self,
1496
+ input_ids: TFModelInputType | None = None,
1497
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1498
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1499
+ position_ids: np.ndarray | tf.Tensor | None = None,
1500
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1501
+ output_attentions: Optional[bool] = None,
1502
+ output_hidden_states: Optional[bool] = None,
1503
+ return_dict: Optional[bool] = None,
1504
+ labels: np.ndarray | tf.Tensor | None = None,
1505
+ training: Optional[bool] = False,
1506
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1507
+ r"""
1508
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1509
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1510
+ """
1511
+ outputs = self.deberta(
1512
+ input_ids=input_ids,
1513
+ attention_mask=attention_mask,
1514
+ token_type_ids=token_type_ids,
1515
+ position_ids=position_ids,
1516
+ inputs_embeds=inputs_embeds,
1517
+ output_attentions=output_attentions,
1518
+ output_hidden_states=output_hidden_states,
1519
+ return_dict=return_dict,
1520
+ training=training,
1521
+ )
1522
+ sequence_output = outputs[0]
1523
+ sequence_output = self.dropout(sequence_output, training=training)
1524
+ logits = self.classifier(inputs=sequence_output)
1525
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1526
+
1527
+ if not return_dict:
1528
+ output = (logits,) + outputs[1:]
1529
+ return ((loss,) + output) if loss is not None else output
1530
+
1531
+ return TFTokenClassifierOutput(
1532
+ loss=loss,
1533
+ logits=logits,
1534
+ hidden_states=outputs.hidden_states,
1535
+ attentions=outputs.attentions,
1536
+ )
1537
+
1538
+ def build(self, input_shape=None):
1539
+ if self.built:
1540
+ return
1541
+ self.built = True
1542
+ if getattr(self, "deberta", None) is not None:
1543
+ with tf.name_scope(self.deberta.name):
1544
+ self.deberta.build(None)
1545
+ if getattr(self, "classifier", None) is not None:
1546
+ with tf.name_scope(self.classifier.name):
1547
+ self.classifier.build([None, None, self.config.hidden_size])
1548
+
1549
+
1550
+ @add_start_docstrings(
1551
+ """
1552
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1553
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1554
+ """,
1555
+ DEBERTA_START_DOCSTRING,
1556
+ )
1557
+ class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss):
1558
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1559
+ super().__init__(config, *inputs, **kwargs)
1560
+
1561
+ self.num_labels = config.num_labels
1562
+
1563
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1564
+ self.qa_outputs = keras.layers.Dense(
1565
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1566
+ )
1567
+ self.config = config
1568
+
1569
+ @unpack_inputs
1570
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1571
+ @add_code_sample_docstrings(
1572
+ checkpoint=_CHECKPOINT_FOR_DOC,
1573
+ output_type=TFQuestionAnsweringModelOutput,
1574
+ config_class=_CONFIG_FOR_DOC,
1575
+ )
1576
+ def call(
1577
+ self,
1578
+ input_ids: TFModelInputType | None = None,
1579
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1580
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1581
+ position_ids: np.ndarray | tf.Tensor | None = None,
1582
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ return_dict: Optional[bool] = None,
1586
+ start_positions: np.ndarray | tf.Tensor | None = None,
1587
+ end_positions: np.ndarray | tf.Tensor | None = None,
1588
+ training: Optional[bool] = False,
1589
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1590
+ r"""
1591
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1592
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1593
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1594
+ are not taken into account for computing the loss.
1595
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1596
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1597
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1598
+ are not taken into account for computing the loss.
1599
+ """
1600
+ outputs = self.deberta(
1601
+ input_ids=input_ids,
1602
+ attention_mask=attention_mask,
1603
+ token_type_ids=token_type_ids,
1604
+ position_ids=position_ids,
1605
+ inputs_embeds=inputs_embeds,
1606
+ output_attentions=output_attentions,
1607
+ output_hidden_states=output_hidden_states,
1608
+ return_dict=return_dict,
1609
+ training=training,
1610
+ )
1611
+ sequence_output = outputs[0]
1612
+ logits = self.qa_outputs(inputs=sequence_output)
1613
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1614
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1615
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1616
+ loss = None
1617
+
1618
+ if start_positions is not None and end_positions is not None:
1619
+ labels = {"start_position": start_positions}
1620
+ labels["end_position"] = end_positions
1621
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1622
+
1623
+ if not return_dict:
1624
+ output = (start_logits, end_logits) + outputs[2:]
1625
+ return ((loss,) + output) if loss is not None else output
1626
+
1627
+ return TFQuestionAnsweringModelOutput(
1628
+ loss=loss,
1629
+ start_logits=start_logits,
1630
+ end_logits=end_logits,
1631
+ hidden_states=outputs.hidden_states,
1632
+ attentions=outputs.attentions,
1633
+ )
1634
+
1635
+ def build(self, input_shape=None):
1636
+ if self.built:
1637
+ return
1638
+ self.built = True
1639
+ if getattr(self, "deberta", None) is not None:
1640
+ with tf.name_scope(self.deberta.name):
1641
+ self.deberta.build(None)
1642
+ if getattr(self, "qa_outputs", None) is not None:
1643
+ with tf.name_scope(self.qa_outputs.name):
1644
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model DeBERTa."""
16
+
17
+ import json
18
+ import os
19
+ from typing import List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
30
+
31
+
32
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
33
+ def bytes_to_unicode():
34
+ """
35
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
36
+ characters the bpe code barfs on.
37
+
38
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
39
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
40
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
41
+ tables between utf-8 bytes and unicode strings.
42
+ """
43
+ bs = (
44
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
45
+ )
46
+ cs = bs[:]
47
+ n = 0
48
+ for b in range(2**8):
49
+ if b not in bs:
50
+ bs.append(b)
51
+ cs.append(2**8 + n)
52
+ n += 1
53
+ cs = [chr(n) for n in cs]
54
+ return dict(zip(bs, cs))
55
+
56
+
57
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
58
+ def get_pairs(word):
59
+ """
60
+ Return set of symbol pairs in a word.
61
+
62
+ Word is represented as tuple of symbols (symbols being variable-length strings).
63
+ """
64
+ pairs = set()
65
+ prev_char = word[0]
66
+ for char in word[1:]:
67
+ pairs.add((prev_char, char))
68
+ prev_char = char
69
+ return pairs
70
+
71
+
72
+ class DebertaTokenizer(PreTrainedTokenizer):
73
+ """
74
+ Construct a DeBERTa tokenizer. Based on byte-level Byte-Pair-Encoding.
75
+
76
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
77
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
78
+
79
+ ```python
80
+ >>> from transformers import DebertaTokenizer
81
+
82
+ >>> tokenizer = DebertaTokenizer.from_pretrained("microsoft/deberta-base")
83
+ >>> tokenizer("Hello world")["input_ids"]
84
+ [1, 31414, 232, 2]
85
+
86
+ >>> tokenizer(" Hello world")["input_ids"]
87
+ [1, 20920, 232, 2]
88
+ ```
89
+
90
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
91
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
92
+
93
+ <Tip>
94
+
95
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
96
+
97
+ </Tip>
98
+
99
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
100
+ this superclass for more information regarding those methods.
101
+
102
+ Args:
103
+ vocab_file (`str`):
104
+ Path to the vocabulary file.
105
+ merges_file (`str`):
106
+ Path to the merges file.
107
+ errors (`str`, *optional*, defaults to `"replace"`):
108
+ Paradigm to follow when decoding bytes to UTF-8. See
109
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
110
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
111
+ The beginning of sequence token.
112
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
113
+ The end of sequence token.
114
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
115
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
116
+ sequence classification or for a text and a question for question answering. It is also used as the last
117
+ token of a sequence built with special tokens.
118
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
119
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
120
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
121
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
122
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
123
+ token instead.
124
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
125
+ The token used for padding, for example when batching sequences of different lengths.
126
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
127
+ The token used for masking values. This is the token used when training this model with masked language
128
+ modeling. This is the token which the model will try to predict.
129
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
130
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
131
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
132
+ add_bos_token (`bool`, *optional*, defaults to `False`):
133
+ Whether or not to add an initial <|endoftext|> to the input. This allows to treat the leading word just as
134
+ any other word.
135
+ """
136
+
137
+ vocab_files_names = VOCAB_FILES_NAMES
138
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
139
+
140
+ def __init__(
141
+ self,
142
+ vocab_file,
143
+ merges_file,
144
+ errors="replace",
145
+ bos_token="[CLS]",
146
+ eos_token="[SEP]",
147
+ sep_token="[SEP]",
148
+ cls_token="[CLS]",
149
+ unk_token="[UNK]",
150
+ pad_token="[PAD]",
151
+ mask_token="[MASK]",
152
+ add_prefix_space=False,
153
+ add_bos_token=False,
154
+ **kwargs,
155
+ ):
156
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
157
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
158
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
159
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
160
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
161
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
162
+
163
+ # Mask token behave like a normal word, i.e. include the space before it
164
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
165
+ self.add_bos_token = add_bos_token
166
+
167
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
168
+ self.encoder = json.load(vocab_handle)
169
+ self.decoder = {v: k for k, v in self.encoder.items()}
170
+ self.errors = errors # how to handle errors in decoding
171
+ self.byte_encoder = bytes_to_unicode()
172
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
173
+ with open(merges_file, encoding="utf-8") as merges_handle:
174
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
175
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
176
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
177
+ self.cache = {}
178
+ self.add_prefix_space = add_prefix_space
179
+
180
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
181
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
182
+
183
+ super().__init__(
184
+ errors=errors,
185
+ bos_token=bos_token,
186
+ eos_token=eos_token,
187
+ unk_token=unk_token,
188
+ sep_token=sep_token,
189
+ cls_token=cls_token,
190
+ pad_token=pad_token,
191
+ mask_token=mask_token,
192
+ add_prefix_space=add_prefix_space,
193
+ add_bos_token=add_bos_token,
194
+ **kwargs,
195
+ )
196
+
197
+ @property
198
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.vocab_size
199
+ def vocab_size(self):
200
+ return len(self.encoder)
201
+
202
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
203
+ def get_vocab(self):
204
+ return dict(self.encoder, **self.added_tokens_encoder)
205
+
206
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
207
+ def bpe(self, token):
208
+ if token in self.cache:
209
+ return self.cache[token]
210
+ word = tuple(token)
211
+ pairs = get_pairs(word)
212
+
213
+ if not pairs:
214
+ return token
215
+
216
+ while True:
217
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
218
+ if bigram not in self.bpe_ranks:
219
+ break
220
+ first, second = bigram
221
+ new_word = []
222
+ i = 0
223
+ while i < len(word):
224
+ try:
225
+ j = word.index(first, i)
226
+ except ValueError:
227
+ new_word.extend(word[i:])
228
+ break
229
+ else:
230
+ new_word.extend(word[i:j])
231
+ i = j
232
+
233
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
234
+ new_word.append(first + second)
235
+ i += 2
236
+ else:
237
+ new_word.append(word[i])
238
+ i += 1
239
+ new_word = tuple(new_word)
240
+ word = new_word
241
+ if len(word) == 1:
242
+ break
243
+ else:
244
+ pairs = get_pairs(word)
245
+ word = " ".join(word)
246
+ self.cache[token] = word
247
+ return word
248
+
249
+ def build_inputs_with_special_tokens(
250
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
251
+ ) -> List[int]:
252
+ """
253
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
254
+ adding special tokens. A DeBERTa sequence has the following format:
255
+
256
+ - single sequence: [CLS] X [SEP]
257
+ - pair of sequences: [CLS] A [SEP] B [SEP]
258
+
259
+ Args:
260
+ token_ids_0 (`List[int]`):
261
+ List of IDs to which the special tokens will be added.
262
+ token_ids_1 (`List[int]`, *optional*):
263
+ Optional second list of IDs for sequence pairs.
264
+
265
+ Returns:
266
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
267
+ """
268
+ if token_ids_1 is None:
269
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
270
+ cls = [self.cls_token_id]
271
+ sep = [self.sep_token_id]
272
+ return cls + token_ids_0 + sep + token_ids_1 + sep
273
+
274
+ def get_special_tokens_mask(
275
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
276
+ ) -> List[int]:
277
+ """
278
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
279
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
280
+
281
+ Args:
282
+ token_ids_0 (`List[int]`):
283
+ List of IDs.
284
+ token_ids_1 (`List[int]`, *optional*):
285
+ Optional second list of IDs for sequence pairs.
286
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
287
+ Whether or not the token list is already formatted with special tokens for the model.
288
+
289
+ Returns:
290
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
291
+ """
292
+ if already_has_special_tokens:
293
+ return super().get_special_tokens_mask(
294
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
295
+ )
296
+
297
+ if token_ids_1 is None:
298
+ return [1] + ([0] * len(token_ids_0)) + [1]
299
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
300
+
301
+ def create_token_type_ids_from_sequences(
302
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
+ ) -> List[int]:
304
+ """
305
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
306
+ sequence pair mask has the following format:
307
+
308
+ ```
309
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
310
+ | first sequence | second sequence |
311
+ ```
312
+
313
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
314
+
315
+ Args:
316
+ token_ids_0 (`List[int]`):
317
+ List of IDs.
318
+ token_ids_1 (`List[int]`, *optional*):
319
+ Optional second list of IDs for sequence pairs.
320
+
321
+ Returns:
322
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
323
+ """
324
+ sep = [self.sep_token_id]
325
+ cls = [self.cls_token_id]
326
+
327
+ if token_ids_1 is None:
328
+ return len(cls + token_ids_0 + sep) * [0]
329
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
330
+
331
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
332
+ def _tokenize(self, text):
333
+ """Tokenize a string."""
334
+ bpe_tokens = []
335
+ for token in re.findall(self.pat, text):
336
+ token = "".join(
337
+ self.byte_encoder[b] for b in token.encode("utf-8")
338
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
339
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
340
+ return bpe_tokens
341
+
342
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
343
+ def _convert_token_to_id(self, token):
344
+ """Converts a token (str) in an id using the vocab."""
345
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
346
+
347
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
348
+ def _convert_id_to_token(self, index):
349
+ """Converts an index (integer) in a token (str) using the vocab."""
350
+ return self.decoder.get(index)
351
+
352
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
353
+ def convert_tokens_to_string(self, tokens):
354
+ """Converts a sequence of tokens (string) in a single string."""
355
+ text = "".join(tokens)
356
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
357
+ return text
358
+
359
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
360
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
361
+ if not os.path.isdir(save_directory):
362
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
363
+ return
364
+ vocab_file = os.path.join(
365
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
366
+ )
367
+ merge_file = os.path.join(
368
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
369
+ )
370
+
371
+ with open(vocab_file, "w", encoding="utf-8") as f:
372
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
373
+
374
+ index = 0
375
+ with open(merge_file, "w", encoding="utf-8") as writer:
376
+ writer.write("#version: 0.2\n")
377
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
378
+ if index != token_index:
379
+ logger.warning(
380
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
381
+ " Please check that the tokenizer is not corrupted!"
382
+ )
383
+ index = token_index
384
+ writer.write(" ".join(bpe_tokens) + "\n")
385
+ index += 1
386
+
387
+ return vocab_file, merge_file
388
+
389
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
390
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
391
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
392
+ text = " " + text
393
+ return (text, kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Fast Tokenization class for model DeBERTa."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import pre_tokenizers
21
+
22
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import logging
25
+ from .tokenization_deberta import DebertaTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+
33
+ class DebertaTokenizerFast(PreTrainedTokenizerFast):
34
+ """
35
+ Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
36
+ Byte-Pair-Encoding.
37
+
38
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
39
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
40
+
41
+ ```python
42
+ >>> from transformers import DebertaTokenizerFast
43
+
44
+ >>> tokenizer = DebertaTokenizerFast.from_pretrained("microsoft/deberta-base")
45
+ >>> tokenizer("Hello world")["input_ids"]
46
+ [1, 31414, 232, 2]
47
+
48
+ >>> tokenizer(" Hello world")["input_ids"]
49
+ [1, 20920, 232, 2]
50
+ ```
51
+
52
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
53
+ the model was not pretrained this way, it might yield a decrease in performance.
54
+
55
+ <Tip>
56
+
57
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
58
+
59
+ </Tip>
60
+
61
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
62
+ refer to this superclass for more information regarding those methods.
63
+
64
+ Args:
65
+ vocab_file (`str`, *optional*):
66
+ Path to the vocabulary file.
67
+ merges_file (`str`, *optional*):
68
+ Path to the merges file.
69
+ tokenizer_file (`str`, *optional*):
70
+ The path to a tokenizer file to use instead of the vocab file.
71
+ errors (`str`, *optional*, defaults to `"replace"`):
72
+ Paradigm to follow when decoding bytes to UTF-8. See
73
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
74
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
75
+ The beginning of sequence token.
76
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ The end of sequence token.
78
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
79
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
80
+ sequence classification or for a text and a question for question answering. It is also used as the last
81
+ token of a sequence built with special tokens.
82
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
83
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
84
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
85
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
86
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
87
+ token instead.
88
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
89
+ The token used for padding, for example when batching sequences of different lengths.
90
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
91
+ The token used for masking values. This is the token used when training this model with masked language
92
+ modeling. This is the token which the model will try to predict.
93
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
94
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
95
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
96
+ """
97
+
98
+ vocab_files_names = VOCAB_FILES_NAMES
99
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
100
+ slow_tokenizer_class = DebertaTokenizer
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_file=None,
105
+ merges_file=None,
106
+ tokenizer_file=None,
107
+ errors="replace",
108
+ bos_token="[CLS]",
109
+ eos_token="[SEP]",
110
+ sep_token="[SEP]",
111
+ cls_token="[CLS]",
112
+ unk_token="[UNK]",
113
+ pad_token="[PAD]",
114
+ mask_token="[MASK]",
115
+ add_prefix_space=False,
116
+ **kwargs,
117
+ ):
118
+ super().__init__(
119
+ vocab_file,
120
+ merges_file,
121
+ tokenizer_file=tokenizer_file,
122
+ errors=errors,
123
+ bos_token=bos_token,
124
+ eos_token=eos_token,
125
+ unk_token=unk_token,
126
+ sep_token=sep_token,
127
+ cls_token=cls_token,
128
+ pad_token=pad_token,
129
+ mask_token=mask_token,
130
+ add_prefix_space=add_prefix_space,
131
+ **kwargs,
132
+ )
133
+ self.add_bos_token = kwargs.pop("add_bos_token", False)
134
+
135
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
136
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
137
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
138
+ pre_tok_state["add_prefix_space"] = add_prefix_space
139
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
140
+
141
+ self.add_prefix_space = add_prefix_space
142
+
143
+ @property
144
+ def mask_token(self) -> str:
145
+ """
146
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
147
+ having been set.
148
+
149
+ Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
150
+ comprise the space before the *[MASK]*.
151
+ """
152
+ if self._mask_token is None:
153
+ if self.verbose:
154
+ logger.error("Using mask_token, but it is not set yet.")
155
+ return None
156
+ return str(self._mask_token)
157
+
158
+ @mask_token.setter
159
+ def mask_token(self, value):
160
+ """
161
+ Overriding the default behavior of the mask token to have it eat the space before it.
162
+ """
163
+ # Mask token behave like a normal word, i.e. include the space before it
164
+ # So we set lstrip to True
165
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
166
+ self._mask_token = value
167
+
168
+ def build_inputs_with_special_tokens(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
173
+ adding special tokens. A DeBERTa sequence has the following format:
174
+
175
+ - single sequence: [CLS] X [SEP]
176
+ - pair of sequences: [CLS] A [SEP] B [SEP]
177
+
178
+ Args:
179
+ token_ids_0 (`List[int]`):
180
+ List of IDs to which the special tokens will be added.
181
+ token_ids_1 (`List[int]`, *optional*):
182
+ Optional second list of IDs for sequence pairs.
183
+
184
+ Returns:
185
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
186
+ """
187
+ if token_ids_1 is None:
188
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
189
+ cls = [self.cls_token_id]
190
+ sep = [self.sep_token_id]
191
+ return cls + token_ids_0 + sep + token_ids_1 + sep
192
+
193
+ def create_token_type_ids_from_sequences(
194
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
195
+ ) -> List[int]:
196
+ """
197
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
198
+ sequence pair mask has the following format:
199
+
200
+ ```
201
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
202
+ | first sequence | second sequence |
203
+ ```
204
+
205
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
206
+
207
+ Args:
208
+ token_ids_0 (`List[int]`):
209
+ List of IDs.
210
+ token_ids_1 (`List[int]`, *optional*):
211
+ Optional second list of IDs for sequence pairs.
212
+
213
+ Returns:
214
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
215
+ """
216
+ sep = [self.sep_token_id]
217
+ cls = [self.cls_token_id]
218
+
219
+ if token_ids_1 is None:
220
+ return len(cls + token_ids_0 + sep) * [0]
221
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
222
+
223
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._batch_encode_plus
224
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
225
+ is_split_into_words = kwargs.get("is_split_into_words", False)
226
+ assert self.add_prefix_space or not is_split_into_words, (
227
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
228
+ "to use it with pretokenized inputs."
229
+ )
230
+
231
+ return super()._batch_encode_plus(*args, **kwargs)
232
+
233
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._encode_plus
234
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
235
+ is_split_into_words = kwargs.get("is_split_into_words", False)
236
+
237
+ assert self.add_prefix_space or not is_split_into_words, (
238
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
239
+ "to use it with pretokenized inputs."
240
+ )
241
+
242
+ return super()._encode_plus(*args, **kwargs)
243
+
244
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
245
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
246
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
247
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mask2former": [
21
+ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Mask2FormerConfig",
23
+ ],
24
+ }
25
+
26
+ try:
27
+ if not is_vision_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["image_processing_mask2former"] = ["Mask2FormerImageProcessor"]
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_mask2former"] = [
41
+ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "Mask2FormerForUniversalSegmentation",
43
+ "Mask2FormerModel",
44
+ "Mask2FormerPreTrainedModel",
45
+ ]
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_mask2former import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Mask2FormerConfig
49
+
50
+ try:
51
+ if not is_vision_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .image_processing_mask2former import Mask2FormerImageProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_mask2former import (
65
+ MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ Mask2FormerForUniversalSegmentation,
67
+ Mask2FormerModel,
68
+ Mask2FormerPreTrainedModel,
69
+ )
70
+
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/configuration_mask2former.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/convert_mask2former_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (26.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/modeling_mask2former.cpython-310.pyc ADDED
Binary file (88.7 kB). View file