applied-ai-018 commited on
Commit
49bd343
·
verified ·
1 Parent(s): 9d93d66

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/15.attention.dense.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/fp32.pt +3 -0
  6. lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-res.json +1 -0
  7. venv/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py +71 -0
  8. venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py +124 -0
  13. venv/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py +1008 -0
  14. venv/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py +173 -0
  15. venv/lib/python3.10/site-packages/transformers/models/deberta/__init__.py +120 -0
  16. venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py +193 -0
  23. venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py +1426 -0
  24. venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py +1644 -0
  25. venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py +393 -0
  26. venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py +247 -0
  27. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/encodec/__init__.py +65 -0
  31. venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py +193 -0
  37. venv/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py +365 -0
  38. venv/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py +206 -0
  39. venv/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py +810 -0
  40. venv/lib/python3.10/site-packages/transformers/models/glpn/__init__.py +75 -0
  41. venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py +135 -0
  47. venv/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py +219 -0
  48. venv/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py +33 -0
  49. venv/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py +233 -0
  50. venv/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py +778 -0
ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f517a90b182dfc13198c7d4a24cd63a138b5968d72944605c412ec085c518404
3
+ size 16778396
ckpts/universal/global_step20/zero/15.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a29804e6c56792e55194ac2972b5a00e29fb4d871134bec165ec9b52f799f10d
3
+ size 16778317
ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ea36382d054b1f217f5323818c4507bacf660a2291c6cd0ecf71fc601e14084
3
+ size 33555612
ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:624d3ec33cde3729327c8cdd4fad871e8dd3fc3369fff103d89f9b31d88132e9
3
+ size 33555627
ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a1643a820a072efeccf06e8a7b4deba3f7da62e083f5db59304f9370940ebb
3
+ size 50332749
lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_2dm": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_2dm": 0}}
venv/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_bert_generation"] = [
37
+ "BertGenerationDecoder",
38
+ "BertGenerationEncoder",
39
+ "BertGenerationPreTrainedModel",
40
+ "load_tf_weights_in_bert_generation",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_bert_generation import BertGenerationConfig
46
+
47
+ try:
48
+ if not is_sentencepiece_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .tokenization_bert_generation import BertGenerationTokenizer
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_bert_generation import (
62
+ BertGenerationDecoder,
63
+ BertGenerationEncoder,
64
+ BertGenerationPreTrainedModel,
65
+ load_tf_weights_in_bert_generation,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc ADDED
Binary file (6.89 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BertGeneration model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+
19
+
20
+ class BertGenerationConfig(PretrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
23
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
24
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
25
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
26
+ architecture.
27
+
28
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
29
+ documentation from [`PretrainedConfig`] for more information.
30
+
31
+ Args:
32
+ vocab_size (`int`, *optional*, defaults to 50358):
33
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
34
+ `inputs_ids` passed when calling [`BertGeneration`].
35
+ hidden_size (`int`, *optional*, defaults to 1024):
36
+ Dimensionality of the encoder layers and the pooler layer.
37
+ num_hidden_layers (`int`, *optional*, defaults to 24):
38
+ Number of hidden layers in the Transformer encoder.
39
+ num_attention_heads (`int`, *optional*, defaults to 16):
40
+ Number of attention heads for each attention layer in the Transformer encoder.
41
+ intermediate_size (`int`, *optional*, defaults to 4096):
42
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
43
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
44
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
45
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
46
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
47
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
48
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
49
+ The dropout ratio for the attention probabilities.
50
+ max_position_embeddings (`int`, *optional*, defaults to 512):
51
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
52
+ just in case (e.g., 512 or 1024 or 2048).
53
+ initializer_range (`float`, *optional*, defaults to 0.02):
54
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
56
+ The epsilon used by the layer normalization layers.
57
+ pad_token_id (`int`, *optional*, defaults to 0):
58
+ Padding token id.
59
+ bos_token_id (`int`, *optional*, defaults to 2):
60
+ Beginning of stream token id.
61
+ eos_token_id (`int`, *optional*, defaults to 1):
62
+ End of stream token id.
63
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
64
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
65
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
66
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
67
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
68
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+
73
+ Examples:
74
+
75
+ ```python
76
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
77
+
78
+ >>> # Initializing a BertGeneration config
79
+ >>> configuration = BertGenerationConfig()
80
+
81
+ >>> # Initializing a model (with random weights) from the config
82
+ >>> model = BertGenerationEncoder(configuration)
83
+
84
+ >>> # Accessing the model configuration
85
+ >>> configuration = model.config
86
+ ```"""
87
+
88
+ model_type = "bert-generation"
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_size=50358,
93
+ hidden_size=1024,
94
+ num_hidden_layers=24,
95
+ num_attention_heads=16,
96
+ intermediate_size=4096,
97
+ hidden_act="gelu",
98
+ hidden_dropout_prob=0.1,
99
+ attention_probs_dropout_prob=0.1,
100
+ max_position_embeddings=512,
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-12,
103
+ pad_token_id=0,
104
+ bos_token_id=2,
105
+ eos_token_id=1,
106
+ position_embedding_type="absolute",
107
+ use_cache=True,
108
+ **kwargs,
109
+ ):
110
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
111
+
112
+ self.vocab_size = vocab_size
113
+ self.hidden_size = hidden_size
114
+ self.num_hidden_layers = num_hidden_layers
115
+ self.num_attention_heads = num_attention_heads
116
+ self.hidden_act = hidden_act
117
+ self.intermediate_size = intermediate_size
118
+ self.hidden_dropout_prob = hidden_dropout_prob
119
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
120
+ self.max_position_embeddings = max_position_embeddings
121
+ self.initializer_range = initializer_range
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.position_embedding_type = position_embedding_type
124
+ self.use_cache = use_cache
venv/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py ADDED
@@ -0,0 +1,1008 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch BERT model specific for generation."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import CrossEntropyLoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import (
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_bert_generation import BertGenerationConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder"
42
+ _CONFIG_FOR_DOC = "BertGenerationConfig"
43
+
44
+
45
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration
46
+ class BertGenerationSelfOutput(nn.Module):
47
+ def __init__(self, config):
48
+ super().__init__()
49
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
50
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
51
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
52
+
53
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
54
+ hidden_states = self.dense(hidden_states)
55
+ hidden_states = self.dropout(hidden_states)
56
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
57
+ return hidden_states
58
+
59
+
60
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
61
+ class BertGenerationSelfAttention(nn.Module):
62
+ def __init__(self, config, position_embedding_type=None):
63
+ super().__init__()
64
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
65
+ raise ValueError(
66
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
67
+ f"heads ({config.num_attention_heads})"
68
+ )
69
+
70
+ self.num_attention_heads = config.num_attention_heads
71
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
72
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
73
+
74
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
75
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
76
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
77
+
78
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
79
+ self.position_embedding_type = position_embedding_type or getattr(
80
+ config, "position_embedding_type", "absolute"
81
+ )
82
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
83
+ self.max_position_embeddings = config.max_position_embeddings
84
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
85
+
86
+ self.is_decoder = config.is_decoder
87
+
88
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
89
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
90
+ x = x.view(new_x_shape)
91
+ return x.permute(0, 2, 1, 3)
92
+
93
+ def forward(
94
+ self,
95
+ hidden_states: torch.Tensor,
96
+ attention_mask: Optional[torch.FloatTensor] = None,
97
+ head_mask: Optional[torch.FloatTensor] = None,
98
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
99
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
100
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
101
+ output_attentions: Optional[bool] = False,
102
+ ) -> Tuple[torch.Tensor]:
103
+ mixed_query_layer = self.query(hidden_states)
104
+
105
+ # If this is instantiated as a cross-attention module, the keys
106
+ # and values come from an encoder; the attention mask needs to be
107
+ # such that the encoder's padding tokens are not attended to.
108
+ is_cross_attention = encoder_hidden_states is not None
109
+
110
+ if is_cross_attention and past_key_value is not None:
111
+ # reuse k,v, cross_attentions
112
+ key_layer = past_key_value[0]
113
+ value_layer = past_key_value[1]
114
+ attention_mask = encoder_attention_mask
115
+ elif is_cross_attention:
116
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
117
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
118
+ attention_mask = encoder_attention_mask
119
+ elif past_key_value is not None:
120
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
121
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
122
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
123
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
124
+ else:
125
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
126
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
127
+
128
+ query_layer = self.transpose_for_scores(mixed_query_layer)
129
+
130
+ use_cache = past_key_value is not None
131
+ if self.is_decoder:
132
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
133
+ # Further calls to cross_attention layer can then reuse all cross-attention
134
+ # key/value_states (first "if" case)
135
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
136
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
137
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
138
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
139
+ past_key_value = (key_layer, value_layer)
140
+
141
+ # Take the dot product between "query" and "key" to get the raw attention scores.
142
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
143
+
144
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
145
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
146
+ if use_cache:
147
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
148
+ -1, 1
149
+ )
150
+ else:
151
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
152
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
153
+ distance = position_ids_l - position_ids_r
154
+
155
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
156
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
157
+
158
+ if self.position_embedding_type == "relative_key":
159
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
160
+ attention_scores = attention_scores + relative_position_scores
161
+ elif self.position_embedding_type == "relative_key_query":
162
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
163
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
164
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
165
+
166
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
167
+ if attention_mask is not None:
168
+ # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function)
169
+ attention_scores = attention_scores + attention_mask
170
+
171
+ # Normalize the attention scores to probabilities.
172
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
173
+
174
+ # This is actually dropping out entire tokens to attend to, which might
175
+ # seem a bit unusual, but is taken from the original Transformer paper.
176
+ attention_probs = self.dropout(attention_probs)
177
+
178
+ # Mask heads if we want to
179
+ if head_mask is not None:
180
+ attention_probs = attention_probs * head_mask
181
+
182
+ context_layer = torch.matmul(attention_probs, value_layer)
183
+
184
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
185
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
186
+ context_layer = context_layer.view(new_context_layer_shape)
187
+
188
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
189
+
190
+ if self.is_decoder:
191
+ outputs = outputs + (past_key_value,)
192
+ return outputs
193
+
194
+
195
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration
196
+ class BertGenerationAttention(nn.Module):
197
+ def __init__(self, config, position_embedding_type=None):
198
+ super().__init__()
199
+ self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type)
200
+ self.output = BertGenerationSelfOutput(config)
201
+ self.pruned_heads = set()
202
+
203
+ def prune_heads(self, heads):
204
+ if len(heads) == 0:
205
+ return
206
+ heads, index = find_pruneable_heads_and_indices(
207
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
208
+ )
209
+
210
+ # Prune linear layers
211
+ self.self.query = prune_linear_layer(self.self.query, index)
212
+ self.self.key = prune_linear_layer(self.self.key, index)
213
+ self.self.value = prune_linear_layer(self.self.value, index)
214
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
215
+
216
+ # Update hyper params and store pruned heads
217
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
218
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
219
+ self.pruned_heads = self.pruned_heads.union(heads)
220
+
221
+ def forward(
222
+ self,
223
+ hidden_states: torch.Tensor,
224
+ attention_mask: Optional[torch.FloatTensor] = None,
225
+ head_mask: Optional[torch.FloatTensor] = None,
226
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
227
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
228
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
229
+ output_attentions: Optional[bool] = False,
230
+ ) -> Tuple[torch.Tensor]:
231
+ self_outputs = self.self(
232
+ hidden_states,
233
+ attention_mask,
234
+ head_mask,
235
+ encoder_hidden_states,
236
+ encoder_attention_mask,
237
+ past_key_value,
238
+ output_attentions,
239
+ )
240
+ attention_output = self.output(self_outputs[0], hidden_states)
241
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
242
+ return outputs
243
+
244
+
245
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration
246
+ class BertGenerationIntermediate(nn.Module):
247
+ def __init__(self, config):
248
+ super().__init__()
249
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
250
+ if isinstance(config.hidden_act, str):
251
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
252
+ else:
253
+ self.intermediate_act_fn = config.hidden_act
254
+
255
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
256
+ hidden_states = self.dense(hidden_states)
257
+ hidden_states = self.intermediate_act_fn(hidden_states)
258
+ return hidden_states
259
+
260
+
261
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration
262
+ class BertGenerationOutput(nn.Module):
263
+ def __init__(self, config):
264
+ super().__init__()
265
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
266
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
267
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
268
+
269
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
270
+ hidden_states = self.dense(hidden_states)
271
+ hidden_states = self.dropout(hidden_states)
272
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
273
+ return hidden_states
274
+
275
+
276
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
277
+ class BertGenerationLayer(nn.Module):
278
+ def __init__(self, config):
279
+ super().__init__()
280
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
281
+ self.seq_len_dim = 1
282
+ self.attention = BertGenerationAttention(config)
283
+ self.is_decoder = config.is_decoder
284
+ self.add_cross_attention = config.add_cross_attention
285
+ if self.add_cross_attention:
286
+ if not self.is_decoder:
287
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
288
+ self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute")
289
+ self.intermediate = BertGenerationIntermediate(config)
290
+ self.output = BertGenerationOutput(config)
291
+
292
+ def forward(
293
+ self,
294
+ hidden_states: torch.Tensor,
295
+ attention_mask: Optional[torch.FloatTensor] = None,
296
+ head_mask: Optional[torch.FloatTensor] = None,
297
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
298
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
299
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
300
+ output_attentions: Optional[bool] = False,
301
+ ) -> Tuple[torch.Tensor]:
302
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
303
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
304
+ self_attention_outputs = self.attention(
305
+ hidden_states,
306
+ attention_mask,
307
+ head_mask,
308
+ output_attentions=output_attentions,
309
+ past_key_value=self_attn_past_key_value,
310
+ )
311
+ attention_output = self_attention_outputs[0]
312
+
313
+ # if decoder, the last output is tuple of self-attn cache
314
+ if self.is_decoder:
315
+ outputs = self_attention_outputs[1:-1]
316
+ present_key_value = self_attention_outputs[-1]
317
+ else:
318
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
319
+
320
+ cross_attn_present_key_value = None
321
+ if self.is_decoder and encoder_hidden_states is not None:
322
+ if not hasattr(self, "crossattention"):
323
+ raise ValueError(
324
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
325
+ " by setting `config.add_cross_attention=True`"
326
+ )
327
+
328
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
329
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
330
+ cross_attention_outputs = self.crossattention(
331
+ attention_output,
332
+ attention_mask,
333
+ head_mask,
334
+ encoder_hidden_states,
335
+ encoder_attention_mask,
336
+ cross_attn_past_key_value,
337
+ output_attentions,
338
+ )
339
+ attention_output = cross_attention_outputs[0]
340
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
341
+
342
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
343
+ cross_attn_present_key_value = cross_attention_outputs[-1]
344
+ present_key_value = present_key_value + cross_attn_present_key_value
345
+
346
+ layer_output = apply_chunking_to_forward(
347
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
348
+ )
349
+ outputs = (layer_output,) + outputs
350
+
351
+ # if decoder, return the attn key/values as the last output
352
+ if self.is_decoder:
353
+ outputs = outputs + (present_key_value,)
354
+
355
+ return outputs
356
+
357
+ def feed_forward_chunk(self, attention_output):
358
+ intermediate_output = self.intermediate(attention_output)
359
+ layer_output = self.output(intermediate_output, attention_output)
360
+ return layer_output
361
+
362
+
363
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration
364
+ class BertEncoder(nn.Module):
365
+ def __init__(self, config):
366
+ super().__init__()
367
+ self.config = config
368
+ self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)])
369
+ self.gradient_checkpointing = False
370
+
371
+ def forward(
372
+ self,
373
+ hidden_states: torch.Tensor,
374
+ attention_mask: Optional[torch.FloatTensor] = None,
375
+ head_mask: Optional[torch.FloatTensor] = None,
376
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
377
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
378
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
379
+ use_cache: Optional[bool] = None,
380
+ output_attentions: Optional[bool] = False,
381
+ output_hidden_states: Optional[bool] = False,
382
+ return_dict: Optional[bool] = True,
383
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
384
+ all_hidden_states = () if output_hidden_states else None
385
+ all_self_attentions = () if output_attentions else None
386
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
387
+
388
+ if self.gradient_checkpointing and self.training:
389
+ if use_cache:
390
+ logger.warning_once(
391
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
392
+ )
393
+ use_cache = False
394
+
395
+ next_decoder_cache = () if use_cache else None
396
+ for i, layer_module in enumerate(self.layer):
397
+ if output_hidden_states:
398
+ all_hidden_states = all_hidden_states + (hidden_states,)
399
+
400
+ layer_head_mask = head_mask[i] if head_mask is not None else None
401
+ past_key_value = past_key_values[i] if past_key_values is not None else None
402
+
403
+ if self.gradient_checkpointing and self.training:
404
+ layer_outputs = self._gradient_checkpointing_func(
405
+ layer_module.__call__,
406
+ hidden_states,
407
+ attention_mask,
408
+ layer_head_mask,
409
+ encoder_hidden_states,
410
+ encoder_attention_mask,
411
+ past_key_value,
412
+ output_attentions,
413
+ )
414
+ else:
415
+ layer_outputs = layer_module(
416
+ hidden_states,
417
+ attention_mask,
418
+ layer_head_mask,
419
+ encoder_hidden_states,
420
+ encoder_attention_mask,
421
+ past_key_value,
422
+ output_attentions,
423
+ )
424
+
425
+ hidden_states = layer_outputs[0]
426
+ if use_cache:
427
+ next_decoder_cache += (layer_outputs[-1],)
428
+ if output_attentions:
429
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
430
+ if self.config.add_cross_attention:
431
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
432
+
433
+ if output_hidden_states:
434
+ all_hidden_states = all_hidden_states + (hidden_states,)
435
+
436
+ if not return_dict:
437
+ return tuple(
438
+ v
439
+ for v in [
440
+ hidden_states,
441
+ next_decoder_cache,
442
+ all_hidden_states,
443
+ all_self_attentions,
444
+ all_cross_attentions,
445
+ ]
446
+ if v is not None
447
+ )
448
+ return BaseModelOutputWithPastAndCrossAttentions(
449
+ last_hidden_state=hidden_states,
450
+ past_key_values=next_decoder_cache,
451
+ hidden_states=all_hidden_states,
452
+ attentions=all_self_attentions,
453
+ cross_attentions=all_cross_attentions,
454
+ )
455
+
456
+
457
+ def load_tf_weights_in_bert_generation(
458
+ model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False
459
+ ):
460
+ try:
461
+ import numpy as np
462
+ import tensorflow.compat.v1 as tf
463
+ import tensorflow_hub as hub
464
+ import tensorflow_text # noqa: F401
465
+
466
+ tf.disable_eager_execution()
467
+ except ImportError:
468
+ logger.error(
469
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
470
+ "https://www.tensorflow.org/install/ for installation instructions."
471
+ )
472
+ raise
473
+ tf_model = hub.Module(tf_hub_path)
474
+ init = tf.global_variables_initializer()
475
+ with tf.Session() as sess:
476
+ init.run()
477
+ all_variables = tf_model.variable_map
478
+ keep_track_variables = all_variables.copy()
479
+ for key in list(all_variables.keys()):
480
+ if "global" in key:
481
+ logger.info(f"Skipping {key}...")
482
+ continue
483
+ if not is_encoder:
484
+ model_pointer = getattr(model, model_class)
485
+ else:
486
+ model_pointer = model
487
+ is_embedding = False
488
+ logger.info(f"Trying to match {key}...")
489
+ # remove start_string = "module/bert/"
490
+ sub_layers = key.split("/")[2:]
491
+ if is_encoder_named_decoder and sub_layers[0] == "encoder":
492
+ logger.info(f"Skipping encoder layer {key} for decoder")
493
+ continue
494
+ if is_encoder and sub_layers[0] == "decoder":
495
+ logger.info(f"Skipping decoder layer {key} for encoder")
496
+ continue
497
+ for i, sub_layer in enumerate(sub_layers):
498
+ if sub_layer == "embeddings":
499
+ is_embedding = True
500
+ elif sub_layer == "LayerNorm":
501
+ is_embedding = False
502
+ if "layer" in sub_layer:
503
+ model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])]
504
+ elif sub_layer in ["kernel", "gamma"]:
505
+ model_pointer = model_pointer.weight
506
+ elif sub_layer == "beta":
507
+ model_pointer = model_pointer.bias
508
+ elif sub_layer == "encdec":
509
+ model_pointer = model_pointer.crossattention.self
510
+ elif sub_layer == "encdec_output":
511
+ model_pointer = model_pointer.crossattention.output
512
+ elif is_encoder_named_decoder and sub_layer == "decoder":
513
+ model_pointer = model_pointer.encoder
514
+ else:
515
+ if sub_layer == "attention" and "encdec" in sub_layers[i + 1]:
516
+ continue
517
+ try:
518
+ model_pointer = getattr(model_pointer, sub_layer)
519
+ except AttributeError:
520
+ logger.info(f"Skipping to initialize {key} at {sub_layer}...")
521
+ raise AttributeError
522
+
523
+ array = np.asarray(sess.run(all_variables[key]))
524
+ if not is_embedding:
525
+ logger.info(f"Transposing numpy weight of shape {array.shape} for {key}")
526
+ array = np.transpose(array)
527
+ else:
528
+ model_pointer = model_pointer.weight
529
+
530
+ if model_pointer.shape != array.shape:
531
+ raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
532
+ logger.info(f"Initialize PyTorch weight {key}")
533
+
534
+ model_pointer.data = torch.from_numpy(array.astype(np.float32))
535
+ keep_track_variables.pop(key, None)
536
+
537
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}")
538
+ return model
539
+
540
+
541
+ class BertGenerationEmbeddings(nn.Module):
542
+ """Construct the embeddings from word and position embeddings."""
543
+
544
+ def __init__(self, config):
545
+ super().__init__()
546
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
547
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
548
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
549
+ # any TensorFlow checkpoint file
550
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
551
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
552
+
553
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
554
+ self.register_buffer(
555
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
556
+ )
557
+
558
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
559
+ if input_ids is not None:
560
+ input_shape = input_ids.size()
561
+ else:
562
+ input_shape = inputs_embeds.size()[:-1]
563
+
564
+ seq_length = input_shape[1]
565
+
566
+ if position_ids is None:
567
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
568
+
569
+ if inputs_embeds is None:
570
+ inputs_embeds = self.word_embeddings(input_ids)
571
+ position_embeddings = self.position_embeddings(position_ids)
572
+
573
+ embeddings = inputs_embeds + position_embeddings
574
+ embeddings = self.LayerNorm(embeddings)
575
+ embeddings = self.dropout(embeddings)
576
+ return embeddings
577
+
578
+
579
+ class BertGenerationPreTrainedModel(PreTrainedModel):
580
+ """
581
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
582
+ models.
583
+ """
584
+
585
+ config_class = BertGenerationConfig
586
+ base_model_prefix = "bert"
587
+ supports_gradient_checkpointing = True
588
+
589
+ def _init_weights(self, module):
590
+ """Initialize the weights"""
591
+ if isinstance(module, nn.Linear):
592
+ # Slightly different from the TF version which uses truncated_normal for initialization
593
+ # cf https://github.com/pytorch/pytorch/pull/5617
594
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
595
+ if module.bias is not None:
596
+ module.bias.data.zero_()
597
+ elif isinstance(module, nn.Embedding):
598
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
599
+ if module.padding_idx is not None:
600
+ module.weight.data[module.padding_idx].zero_()
601
+ elif isinstance(module, nn.LayerNorm):
602
+ module.bias.data.zero_()
603
+ module.weight.data.fill_(1.0)
604
+
605
+
606
+ BERT_GENERATION_START_DOCSTRING = r"""
607
+
608
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
609
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
610
+ etc.)
611
+
612
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
613
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
614
+ and behavior.
615
+
616
+ Parameters:
617
+ config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model.
618
+ Initializing with a config file does not load the weights associated with the model, only the
619
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
620
+ """
621
+
622
+ BERT_GENERATION_INPUTS_DOCSTRING = r"""
623
+ Args:
624
+ input_ids (`torch.LongTensor` of shape `({0})`):
625
+ Indices of input sequence tokens in the vocabulary.
626
+
627
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
628
+ [`PreTrainedTokenizer.encode`] for details.
629
+
630
+ [What are input IDs?](../glossary#input-ids)
631
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
632
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ [What are attention masks?](../glossary#attention-mask)
638
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
639
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
640
+ config.max_position_embeddings - 1]`.
641
+
642
+ [What are position IDs?](../glossary#position-ids)
643
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
644
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
645
+
646
+ - 1 indicates the head is **not masked**,
647
+ - 0 indicates the head is **masked**.
648
+
649
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
650
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
651
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
652
+ model's internal embedding lookup matrix.
653
+ output_attentions (`bool`, *optional*):
654
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
655
+ tensors for more detail.
656
+ output_hidden_states (`bool`, *optional*):
657
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
658
+ more detail.
659
+ return_dict (`bool`, *optional*):
660
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
661
+ """
662
+
663
+
664
+ @add_start_docstrings(
665
+ "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.",
666
+ BERT_GENERATION_START_DOCSTRING,
667
+ )
668
+ class BertGenerationEncoder(BertGenerationPreTrainedModel):
669
+ """
670
+
671
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
672
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
673
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
674
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
675
+
676
+ This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as
677
+ described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
678
+ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.
679
+
680
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
681
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
682
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
683
+ """
684
+
685
+ def __init__(self, config):
686
+ super().__init__(config)
687
+ self.config = config
688
+
689
+ self.embeddings = BertGenerationEmbeddings(config)
690
+ self.encoder = BertEncoder(config)
691
+
692
+ # Initialize weights and apply final processing
693
+ self.post_init()
694
+
695
+ def get_input_embeddings(self):
696
+ return self.embeddings.word_embeddings
697
+
698
+ def set_input_embeddings(self, value):
699
+ self.embeddings.word_embeddings = value
700
+
701
+ def _prune_heads(self, heads_to_prune):
702
+ """
703
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
704
+ class PreTrainedModel
705
+ """
706
+ for layer, heads in heads_to_prune.items():
707
+ self.encoder.layer[layer].attention.prune_heads(heads)
708
+
709
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
710
+ @add_code_sample_docstrings(
711
+ checkpoint=_CHECKPOINT_FOR_DOC,
712
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
713
+ config_class=_CONFIG_FOR_DOC,
714
+ )
715
+ def forward(
716
+ self,
717
+ input_ids: Optional[torch.Tensor] = None,
718
+ attention_mask: Optional[torch.Tensor] = None,
719
+ position_ids: Optional[torch.Tensor] = None,
720
+ head_mask: Optional[torch.Tensor] = None,
721
+ inputs_embeds: Optional[torch.Tensor] = None,
722
+ encoder_hidden_states: Optional[torch.Tensor] = None,
723
+ encoder_attention_mask: Optional[torch.Tensor] = None,
724
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
725
+ use_cache: Optional[bool] = None,
726
+ output_attentions: Optional[bool] = None,
727
+ output_hidden_states: Optional[bool] = None,
728
+ return_dict: Optional[bool] = None,
729
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
730
+ r"""
731
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
732
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
733
+ the model is configured as a decoder.
734
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
735
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
736
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for
737
+ tokens that are NOT MASKED, `0` for MASKED tokens.
738
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
739
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
740
+
741
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
742
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
743
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
744
+ use_cache (`bool`, *optional*):
745
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
746
+ `past_key_values`).
747
+ """
748
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
749
+ output_hidden_states = (
750
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
751
+ )
752
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
753
+
754
+ if self.config.is_decoder:
755
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
756
+ else:
757
+ use_cache = False
758
+
759
+ if input_ids is not None and inputs_embeds is not None:
760
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
761
+ elif input_ids is not None:
762
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
763
+ input_shape = input_ids.size()
764
+ elif inputs_embeds is not None:
765
+ input_shape = inputs_embeds.size()[:-1]
766
+ else:
767
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
768
+
769
+ batch_size, seq_length = input_shape
770
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
771
+
772
+ # past_key_values_length
773
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
774
+
775
+ if attention_mask is None:
776
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
777
+
778
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
779
+ # ourselves in which case we just need to make it broadcastable to all heads.
780
+ extended_attention_mask = None
781
+ if not use_cache:
782
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
783
+
784
+ # If a 2D or 3D attention mask is provided for the cross-attention
785
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
786
+ if self.config.is_decoder and encoder_hidden_states is not None:
787
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
788
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
789
+ if encoder_attention_mask is None:
790
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
791
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
792
+ else:
793
+ encoder_extended_attention_mask = None
794
+
795
+ # Prepare head mask if needed
796
+ # 1.0 in head_mask indicate we keep the head
797
+ # attention_probs has shape bsz x n_heads x N x N
798
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
799
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
800
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
801
+
802
+ embedding_output = self.embeddings(
803
+ input_ids=input_ids,
804
+ position_ids=position_ids,
805
+ inputs_embeds=inputs_embeds,
806
+ past_key_values_length=past_key_values_length,
807
+ )
808
+
809
+ encoder_outputs = self.encoder(
810
+ embedding_output,
811
+ attention_mask=extended_attention_mask,
812
+ head_mask=head_mask,
813
+ encoder_hidden_states=encoder_hidden_states,
814
+ encoder_attention_mask=encoder_extended_attention_mask,
815
+ past_key_values=past_key_values,
816
+ use_cache=use_cache,
817
+ output_attentions=output_attentions,
818
+ output_hidden_states=output_hidden_states,
819
+ return_dict=return_dict,
820
+ )
821
+ sequence_output = encoder_outputs[0]
822
+
823
+ if not return_dict:
824
+ return (sequence_output,) + encoder_outputs[1:]
825
+
826
+ return BaseModelOutputWithPastAndCrossAttentions(
827
+ last_hidden_state=sequence_output,
828
+ past_key_values=encoder_outputs.past_key_values,
829
+ hidden_states=encoder_outputs.hidden_states,
830
+ attentions=encoder_outputs.attentions,
831
+ cross_attentions=encoder_outputs.cross_attentions,
832
+ )
833
+
834
+
835
+ class BertGenerationOnlyLMHead(nn.Module):
836
+ def __init__(self, config):
837
+ super().__init__()
838
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
839
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
840
+ self.decoder.bias = self.bias
841
+
842
+ def forward(self, hidden_states):
843
+ logits = self.decoder(hidden_states)
844
+ return logits
845
+
846
+ def _tie_weights(self):
847
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
848
+ self.bias = self.decoder.bias
849
+
850
+
851
+ @add_start_docstrings(
852
+ """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""",
853
+ BERT_GENERATION_START_DOCSTRING,
854
+ )
855
+ class BertGenerationDecoder(BertGenerationPreTrainedModel):
856
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
857
+
858
+ def __init__(self, config):
859
+ super().__init__(config)
860
+
861
+ if not config.is_decoder:
862
+ logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
863
+
864
+ self.bert = BertGenerationEncoder(config)
865
+ self.lm_head = BertGenerationOnlyLMHead(config)
866
+
867
+ # Initialize weights and apply final processing
868
+ self.post_init()
869
+
870
+ def get_output_embeddings(self):
871
+ return self.lm_head.decoder
872
+
873
+ def set_output_embeddings(self, new_embeddings):
874
+ self.lm_head.decoder = new_embeddings
875
+
876
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
877
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
878
+ def forward(
879
+ self,
880
+ input_ids: Optional[torch.Tensor] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ position_ids: Optional[torch.Tensor] = None,
883
+ head_mask: Optional[torch.Tensor] = None,
884
+ inputs_embeds: Optional[torch.Tensor] = None,
885
+ encoder_hidden_states: Optional[torch.Tensor] = None,
886
+ encoder_attention_mask: Optional[torch.Tensor] = None,
887
+ labels: Optional[torch.Tensor] = None,
888
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
889
+ use_cache: Optional[bool] = None,
890
+ output_attentions: Optional[bool] = None,
891
+ output_hidden_states: Optional[bool] = None,
892
+ return_dict: Optional[bool] = None,
893
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
894
+ r"""
895
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
896
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
897
+ the model is configured as a decoder.
898
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
899
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
900
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
901
+
902
+ - 1 for tokens that are **not masked**,
903
+ - 0 for tokens that are **masked**.
904
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
905
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
906
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
907
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
908
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
909
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
910
+
911
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
912
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
913
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
914
+ use_cache (`bool`, *optional*):
915
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
916
+ `past_key_values`).
917
+
918
+ Returns:
919
+
920
+ Example:
921
+
922
+ ```python
923
+ >>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
924
+ >>> import torch
925
+
926
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
927
+ >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
928
+ >>> config.is_decoder = True
929
+ >>> model = BertGenerationDecoder.from_pretrained(
930
+ ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
931
+ ... )
932
+
933
+ >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
934
+ >>> outputs = model(**inputs)
935
+
936
+ >>> prediction_logits = outputs.logits
937
+ ```"""
938
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
939
+ if labels is not None:
940
+ use_cache = False
941
+
942
+ outputs = self.bert(
943
+ input_ids,
944
+ attention_mask=attention_mask,
945
+ position_ids=position_ids,
946
+ head_mask=head_mask,
947
+ inputs_embeds=inputs_embeds,
948
+ encoder_hidden_states=encoder_hidden_states,
949
+ encoder_attention_mask=encoder_attention_mask,
950
+ past_key_values=past_key_values,
951
+ use_cache=use_cache,
952
+ output_attentions=output_attentions,
953
+ output_hidden_states=output_hidden_states,
954
+ return_dict=return_dict,
955
+ )
956
+
957
+ sequence_output = outputs[0]
958
+ prediction_scores = self.lm_head(sequence_output)
959
+
960
+ lm_loss = None
961
+ if labels is not None:
962
+ # we are doing next-token prediction; shift prediction scores and input ids by one
963
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
964
+ labels = labels[:, 1:].contiguous()
965
+ loss_fct = CrossEntropyLoss()
966
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
967
+
968
+ if not return_dict:
969
+ output = (prediction_scores,) + outputs[1:]
970
+ return ((lm_loss,) + output) if lm_loss is not None else output
971
+
972
+ return CausalLMOutputWithCrossAttentions(
973
+ loss=lm_loss,
974
+ logits=prediction_scores,
975
+ past_key_values=outputs.past_key_values,
976
+ hidden_states=outputs.hidden_states,
977
+ attentions=outputs.attentions,
978
+ cross_attentions=outputs.cross_attentions,
979
+ )
980
+
981
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
982
+ input_shape = input_ids.shape
983
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
984
+ if attention_mask is None:
985
+ attention_mask = input_ids.new_ones(input_shape)
986
+
987
+ # cut decoder_input_ids if past_key_values is used
988
+ if past_key_values is not None:
989
+ past_length = past_key_values[0][0].shape[2]
990
+
991
+ # Some generation methods already pass only the last input ID
992
+ if input_ids.shape[1] > past_length:
993
+ remove_prefix_length = past_length
994
+ else:
995
+ # Default to old behavior: keep only final ID
996
+ remove_prefix_length = input_ids.shape[1] - 1
997
+
998
+ input_ids = input_ids[:, remove_prefix_length:]
999
+
1000
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1001
+
1002
+ def _reorder_cache(self, past_key_values, beam_idx):
1003
+ reordered_past = ()
1004
+ for layer_past in past_key_values:
1005
+ reordered_past += (
1006
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1007
+ )
1008
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model BertGeneration."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+
33
+ class BertGenerationTokenizer(PreTrainedTokenizer):
34
+ """
35
+ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
36
+
37
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
38
+ this superclass for more information regarding those methods.
39
+
40
+ Args:
41
+ vocab_file (`str`):
42
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
43
+ contains the vocabulary necessary to instantiate a tokenizer.
44
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
45
+ The begin of sequence token.
46
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
47
+ The end of sequence token.
48
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
49
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
50
+ token instead.
51
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ sep_token (`str`, *optional*, defaults to `"<::::>"`):
54
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
55
+ sequence classification or for a text and a question for question answering. It is also used as the last
56
+ token of a sequence built with special tokens.
57
+ sp_model_kwargs (`dict`, *optional*):
58
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
59
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
60
+ to set:
61
+
62
+ - `enable_sampling`: Enable subword regularization.
63
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
64
+
65
+ - `nbest_size = {0,1}`: No sampling is performed.
66
+ - `nbest_size > 1`: samples from the nbest_size results.
67
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
68
+ using forward-filtering-and-backward-sampling algorithm.
69
+
70
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
71
+ BPE-dropout.
72
+ """
73
+
74
+ vocab_files_names = VOCAB_FILES_NAMES
75
+ prefix_tokens: List[int] = []
76
+ model_input_names = ["input_ids", "attention_mask"]
77
+
78
+ def __init__(
79
+ self,
80
+ vocab_file,
81
+ bos_token="<s>",
82
+ eos_token="</s>",
83
+ unk_token="<unk>",
84
+ pad_token="<pad>",
85
+ sep_token="<::::>",
86
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
87
+ **kwargs,
88
+ ) -> None:
89
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
90
+
91
+ self.vocab_file = vocab_file
92
+
93
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
94
+ self.sp_model.Load(vocab_file)
95
+
96
+ # Add extra_ids to the special token list
97
+ super().__init__(
98
+ bos_token=bos_token,
99
+ eos_token=eos_token,
100
+ unk_token=unk_token,
101
+ pad_token=pad_token,
102
+ sep_token=sep_token,
103
+ sp_model_kwargs=self.sp_model_kwargs,
104
+ **kwargs,
105
+ )
106
+
107
+ @property
108
+ def vocab_size(self):
109
+ return self.sp_model.get_piece_size()
110
+
111
+ def get_vocab(self):
112
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
113
+ vocab.update(self.added_tokens_encoder)
114
+ return vocab
115
+
116
+ def __getstate__(self):
117
+ state = self.__dict__.copy()
118
+ state["sp_model"] = None
119
+ return state
120
+
121
+ def __setstate__(self, d):
122
+ self.__dict__ = d
123
+
124
+ # for backward compatibility
125
+ if not hasattr(self, "sp_model_kwargs"):
126
+ self.sp_model_kwargs = {}
127
+
128
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
129
+ self.sp_model.Load(self.vocab_file)
130
+
131
+ def _tokenize(self, text: str) -> List[str]:
132
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
133
+ return self.sp_model.encode(text, out_type=str)
134
+
135
+ def _convert_token_to_id(self, token):
136
+ """Converts a token (str) in an id using the vocab."""
137
+ return self.sp_model.piece_to_id(token)
138
+
139
+ def _convert_id_to_token(self, index):
140
+ """Converts an index (integer) in a token (str) using the vocab."""
141
+ token = self.sp_model.IdToPiece(index)
142
+ return token
143
+
144
+ def convert_tokens_to_string(self, tokens):
145
+ """Converts a sequence of tokens (string) in a single string."""
146
+ current_sub_tokens = []
147
+ out_string = ""
148
+ for token in tokens:
149
+ # make sure that special tokens are not decoded using sentencepiece model
150
+ if token in self.all_special_tokens:
151
+ out_string += self.sp_model.decode(current_sub_tokens) + token
152
+ current_sub_tokens = []
153
+ else:
154
+ current_sub_tokens.append(token)
155
+ out_string += self.sp_model.decode(current_sub_tokens)
156
+ return out_string.strip()
157
+
158
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
159
+ if not os.path.isdir(save_directory):
160
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
161
+ return
162
+ out_vocab_file = os.path.join(
163
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
164
+ )
165
+
166
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
167
+ copyfile(self.vocab_file, out_vocab_file)
168
+ elif not os.path.isfile(self.vocab_file):
169
+ with open(out_vocab_file, "wb") as fi:
170
+ content_spiece_model = self.sp_model.serialized_model_proto()
171
+ fi.write(content_spiece_model)
172
+
173
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/deberta/__init__.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
28
+ "tokenization_deberta": ["DebertaTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_deberta"] = [
46
+ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "DebertaForMaskedLM",
48
+ "DebertaForQuestionAnswering",
49
+ "DebertaForSequenceClassification",
50
+ "DebertaForTokenClassification",
51
+ "DebertaModel",
52
+ "DebertaPreTrainedModel",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_deberta"] = [
62
+ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFDebertaForMaskedLM",
64
+ "TFDebertaForQuestionAnswering",
65
+ "TFDebertaForSequenceClassification",
66
+ "TFDebertaForTokenClassification",
67
+ "TFDebertaModel",
68
+ "TFDebertaPreTrainedModel",
69
+ ]
70
+
71
+
72
+ if TYPE_CHECKING:
73
+ from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
74
+ from .tokenization_deberta import DebertaTokenizer
75
+
76
+ try:
77
+ if not is_tokenizers_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .tokenization_deberta_fast import DebertaTokenizerFast
83
+
84
+ try:
85
+ if not is_torch_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .modeling_deberta import (
91
+ DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
92
+ DebertaForMaskedLM,
93
+ DebertaForQuestionAnswering,
94
+ DebertaForSequenceClassification,
95
+ DebertaForTokenClassification,
96
+ DebertaModel,
97
+ DebertaPreTrainedModel,
98
+ )
99
+
100
+ try:
101
+ if not is_tf_available():
102
+ raise OptionalDependencyNotAvailable()
103
+ except OptionalDependencyNotAvailable:
104
+ pass
105
+ else:
106
+ from .modeling_tf_deberta import (
107
+ TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
108
+ TFDebertaForMaskedLM,
109
+ TFDebertaForQuestionAnswering,
110
+ TFDebertaForSequenceClassification,
111
+ TFDebertaForTokenClassification,
112
+ TFDebertaModel,
113
+ TFDebertaPreTrainedModel,
114
+ )
115
+
116
+
117
+ else:
118
+ import sys
119
+
120
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc ADDED
Binary file (7.94 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc ADDED
Binary file (51.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DeBERTa model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ if TYPE_CHECKING:
25
+ from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class DebertaConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
37
+ used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
38
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
39
+ [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Arguments:
45
+ vocab_size (`int`, *optional*, defaults to 30522):
46
+ Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
47
+ `inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
48
+ hidden_size (`int`, *optional*, defaults to 768):
49
+ Dimensionality of the encoder layers and the pooler layer.
50
+ num_hidden_layers (`int`, *optional*, defaults to 12):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_attention_heads (`int`, *optional*, defaults to 12):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ intermediate_size (`int`, *optional*, defaults to 3072):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
56
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
59
+ are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 512):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ type_vocab_size (`int`, *optional*, defaults to 2):
68
+ The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ relative_attention (`bool`, *optional*, defaults to `False`):
74
+ Whether use relative position encoding.
75
+ max_relative_positions (`int`, *optional*, defaults to 1):
76
+ The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
77
+ as `max_position_embeddings`.
78
+ pad_token_id (`int`, *optional*, defaults to 0):
79
+ The value used to pad input_ids.
80
+ position_biased_input (`bool`, *optional*, defaults to `True`):
81
+ Whether add absolute position embedding to content embedding.
82
+ pos_att_type (`List[str]`, *optional*):
83
+ The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
84
+ `["p2c", "c2p"]`.
85
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
86
+ The epsilon used by the layer normalization layers.
87
+
88
+ Example:
89
+
90
+ ```python
91
+ >>> from transformers import DebertaConfig, DebertaModel
92
+
93
+ >>> # Initializing a DeBERTa microsoft/deberta-base style configuration
94
+ >>> configuration = DebertaConfig()
95
+
96
+ >>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
97
+ >>> model = DebertaModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "deberta"
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_size=50265,
108
+ hidden_size=768,
109
+ num_hidden_layers=12,
110
+ num_attention_heads=12,
111
+ intermediate_size=3072,
112
+ hidden_act="gelu",
113
+ hidden_dropout_prob=0.1,
114
+ attention_probs_dropout_prob=0.1,
115
+ max_position_embeddings=512,
116
+ type_vocab_size=0,
117
+ initializer_range=0.02,
118
+ layer_norm_eps=1e-7,
119
+ relative_attention=False,
120
+ max_relative_positions=-1,
121
+ pad_token_id=0,
122
+ position_biased_input=True,
123
+ pos_att_type=None,
124
+ pooler_dropout=0,
125
+ pooler_hidden_act="gelu",
126
+ **kwargs,
127
+ ):
128
+ super().__init__(**kwargs)
129
+
130
+ self.hidden_size = hidden_size
131
+ self.num_hidden_layers = num_hidden_layers
132
+ self.num_attention_heads = num_attention_heads
133
+ self.intermediate_size = intermediate_size
134
+ self.hidden_act = hidden_act
135
+ self.hidden_dropout_prob = hidden_dropout_prob
136
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.type_vocab_size = type_vocab_size
139
+ self.initializer_range = initializer_range
140
+ self.relative_attention = relative_attention
141
+ self.max_relative_positions = max_relative_positions
142
+ self.pad_token_id = pad_token_id
143
+ self.position_biased_input = position_biased_input
144
+
145
+ # Backwards compatibility
146
+ if isinstance(pos_att_type, str):
147
+ pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
148
+
149
+ self.pos_att_type = pos_att_type
150
+ self.vocab_size = vocab_size
151
+ self.layer_norm_eps = layer_norm_eps
152
+
153
+ self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
154
+ self.pooler_dropout = pooler_dropout
155
+ self.pooler_hidden_act = pooler_hidden_act
156
+
157
+
158
+ # Copied from transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig
159
+ class DebertaOnnxConfig(OnnxConfig):
160
+ @property
161
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
162
+ if self.task == "multiple-choice":
163
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
164
+ else:
165
+ dynamic_axis = {0: "batch", 1: "sequence"}
166
+ if self._config.type_vocab_size > 0:
167
+ return OrderedDict(
168
+ [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
169
+ )
170
+ else:
171
+ return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
172
+
173
+ @property
174
+ def default_onnx_opset(self) -> int:
175
+ return 12
176
+
177
+ def generate_dummy_inputs(
178
+ self,
179
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
180
+ batch_size: int = -1,
181
+ seq_length: int = -1,
182
+ num_choices: int = -1,
183
+ is_pair: bool = False,
184
+ framework: Optional["TensorType"] = None,
185
+ num_channels: int = 3,
186
+ image_width: int = 40,
187
+ image_height: int = 40,
188
+ tokenizer: "PreTrainedTokenizerBase" = None,
189
+ ) -> Mapping[str, Any]:
190
+ dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
191
+ if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
192
+ del dummy_inputs["token_type_ids"]
193
+ return dummy_inputs
venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py ADDED
@@ -0,0 +1,1426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the Hugging Face Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DeBERTa model."""
16
+
17
+ from collections.abc import Sequence
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ MaskedLMOutput,
29
+ QuestionAnsweringModelOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import softmax_backward_data
35
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
36
+ from .configuration_deberta import DebertaConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+ _CONFIG_FOR_DOC = "DebertaConfig"
41
+ _CHECKPOINT_FOR_DOC = "microsoft/deberta-base"
42
+
43
+ # Masked LM docstring
44
+ _CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback"
45
+ _MASKED_LM_EXPECTED_OUTPUT = "' Paris'"
46
+ _MASKED_LM_EXPECTED_LOSS = "0.54"
47
+
48
+ # QuestionAnswering docstring
49
+ _CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad"
50
+ _QA_EXPECTED_OUTPUT = "' a nice puppet'"
51
+ _QA_EXPECTED_LOSS = 0.14
52
+ _QA_TARGET_START_INDEX = 12
53
+ _QA_TARGET_END_INDEX = 14
54
+
55
+
56
+ from ..deprecated._archive_maps import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ class ContextPooler(nn.Module):
60
+ def __init__(self, config):
61
+ super().__init__()
62
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
63
+ self.dropout = StableDropout(config.pooler_dropout)
64
+ self.config = config
65
+
66
+ def forward(self, hidden_states):
67
+ # We "pool" the model by simply taking the hidden state corresponding
68
+ # to the first token.
69
+
70
+ context_token = hidden_states[:, 0]
71
+ context_token = self.dropout(context_token)
72
+ pooled_output = self.dense(context_token)
73
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
74
+ return pooled_output
75
+
76
+ @property
77
+ def output_dim(self):
78
+ return self.config.hidden_size
79
+
80
+
81
+ class XSoftmax(torch.autograd.Function):
82
+ """
83
+ Masked Softmax which is optimized for saving memory
84
+
85
+ Args:
86
+ input (`torch.tensor`): The input tensor that will apply softmax.
87
+ mask (`torch.IntTensor`):
88
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
89
+ dim (int): The dimension that will apply softmax
90
+
91
+ Example:
92
+
93
+ ```python
94
+ >>> import torch
95
+ >>> from transformers.models.deberta.modeling_deberta import XSoftmax
96
+
97
+ >>> # Make a tensor
98
+ >>> x = torch.randn([4, 20, 100])
99
+
100
+ >>> # Create a mask
101
+ >>> mask = (x > 0).int()
102
+
103
+ >>> # Specify the dimension to apply softmax
104
+ >>> dim = -1
105
+
106
+ >>> y = XSoftmax.apply(x, mask, dim)
107
+ ```"""
108
+
109
+ @staticmethod
110
+ def forward(self, input, mask, dim):
111
+ self.dim = dim
112
+ rmask = ~(mask.to(torch.bool))
113
+
114
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
115
+ output = torch.softmax(output, self.dim)
116
+ output.masked_fill_(rmask, 0)
117
+ self.save_for_backward(output)
118
+ return output
119
+
120
+ @staticmethod
121
+ def backward(self, grad_output):
122
+ (output,) = self.saved_tensors
123
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
124
+ return inputGrad, None, None
125
+
126
+ @staticmethod
127
+ def symbolic(g, self, mask, dim):
128
+ import torch.onnx.symbolic_helper as sym_help
129
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
130
+
131
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
132
+ r_mask = g.op(
133
+ "Cast",
134
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
135
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
136
+ )
137
+ output = masked_fill(
138
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
139
+ )
140
+ output = softmax(g, output, dim)
141
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
142
+
143
+
144
+ class DropoutContext(object):
145
+ def __init__(self):
146
+ self.dropout = 0
147
+ self.mask = None
148
+ self.scale = 1
149
+ self.reuse_mask = True
150
+
151
+
152
+ def get_mask(input, local_context):
153
+ if not isinstance(local_context, DropoutContext):
154
+ dropout = local_context
155
+ mask = None
156
+ else:
157
+ dropout = local_context.dropout
158
+ dropout *= local_context.scale
159
+ mask = local_context.mask if local_context.reuse_mask else None
160
+
161
+ if dropout > 0 and mask is None:
162
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
163
+
164
+ if isinstance(local_context, DropoutContext):
165
+ if local_context.mask is None:
166
+ local_context.mask = mask
167
+
168
+ return mask, dropout
169
+
170
+
171
+ class XDropout(torch.autograd.Function):
172
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
173
+
174
+ @staticmethod
175
+ def forward(ctx, input, local_ctx):
176
+ mask, dropout = get_mask(input, local_ctx)
177
+ ctx.scale = 1.0 / (1 - dropout)
178
+ if dropout > 0:
179
+ ctx.save_for_backward(mask)
180
+ return input.masked_fill(mask, 0) * ctx.scale
181
+ else:
182
+ return input
183
+
184
+ @staticmethod
185
+ def backward(ctx, grad_output):
186
+ if ctx.scale > 1:
187
+ (mask,) = ctx.saved_tensors
188
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
189
+ else:
190
+ return grad_output, None
191
+
192
+ @staticmethod
193
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
194
+ from torch.onnx import symbolic_opset12
195
+
196
+ dropout_p = local_ctx
197
+ if isinstance(local_ctx, DropoutContext):
198
+ dropout_p = local_ctx.dropout
199
+ # StableDropout only calls this function when training.
200
+ train = True
201
+ # TODO: We should check if the opset_version being used to export
202
+ # is > 12 here, but there's no good way to do that. As-is, if the
203
+ # opset_version < 12, export will fail with a CheckerError.
204
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
205
+ # if opset_version < 12:
206
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
207
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
208
+
209
+
210
+ class StableDropout(nn.Module):
211
+ """
212
+ Optimized dropout module for stabilizing the training
213
+
214
+ Args:
215
+ drop_prob (float): the dropout probabilities
216
+ """
217
+
218
+ def __init__(self, drop_prob):
219
+ super().__init__()
220
+ self.drop_prob = drop_prob
221
+ self.count = 0
222
+ self.context_stack = None
223
+
224
+ def forward(self, x):
225
+ """
226
+ Call the module
227
+
228
+ Args:
229
+ x (`torch.tensor`): The input tensor to apply dropout
230
+ """
231
+ if self.training and self.drop_prob > 0:
232
+ return XDropout.apply(x, self.get_context())
233
+ return x
234
+
235
+ def clear_context(self):
236
+ self.count = 0
237
+ self.context_stack = None
238
+
239
+ def init_context(self, reuse_mask=True, scale=1):
240
+ if self.context_stack is None:
241
+ self.context_stack = []
242
+ self.count = 0
243
+ for c in self.context_stack:
244
+ c.reuse_mask = reuse_mask
245
+ c.scale = scale
246
+
247
+ def get_context(self):
248
+ if self.context_stack is not None:
249
+ if self.count >= len(self.context_stack):
250
+ self.context_stack.append(DropoutContext())
251
+ ctx = self.context_stack[self.count]
252
+ ctx.dropout = self.drop_prob
253
+ self.count += 1
254
+ return ctx
255
+ else:
256
+ return self.drop_prob
257
+
258
+
259
+ class DebertaLayerNorm(nn.Module):
260
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
261
+
262
+ def __init__(self, size, eps=1e-12):
263
+ super().__init__()
264
+ self.weight = nn.Parameter(torch.ones(size))
265
+ self.bias = nn.Parameter(torch.zeros(size))
266
+ self.variance_epsilon = eps
267
+
268
+ def forward(self, hidden_states):
269
+ input_type = hidden_states.dtype
270
+ hidden_states = hidden_states.float()
271
+ mean = hidden_states.mean(-1, keepdim=True)
272
+ variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
273
+ hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
274
+ hidden_states = hidden_states.to(input_type)
275
+ y = self.weight * hidden_states + self.bias
276
+ return y
277
+
278
+
279
+ class DebertaSelfOutput(nn.Module):
280
+ def __init__(self, config):
281
+ super().__init__()
282
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
283
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
284
+ self.dropout = StableDropout(config.hidden_dropout_prob)
285
+
286
+ def forward(self, hidden_states, input_tensor):
287
+ hidden_states = self.dense(hidden_states)
288
+ hidden_states = self.dropout(hidden_states)
289
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
290
+ return hidden_states
291
+
292
+
293
+ class DebertaAttention(nn.Module):
294
+ def __init__(self, config):
295
+ super().__init__()
296
+ self.self = DisentangledSelfAttention(config)
297
+ self.output = DebertaSelfOutput(config)
298
+ self.config = config
299
+
300
+ def forward(
301
+ self,
302
+ hidden_states,
303
+ attention_mask,
304
+ output_attentions=False,
305
+ query_states=None,
306
+ relative_pos=None,
307
+ rel_embeddings=None,
308
+ ):
309
+ self_output = self.self(
310
+ hidden_states,
311
+ attention_mask,
312
+ output_attentions,
313
+ query_states=query_states,
314
+ relative_pos=relative_pos,
315
+ rel_embeddings=rel_embeddings,
316
+ )
317
+ if output_attentions:
318
+ self_output, att_matrix = self_output
319
+ if query_states is None:
320
+ query_states = hidden_states
321
+ attention_output = self.output(self_output, query_states)
322
+
323
+ if output_attentions:
324
+ return (attention_output, att_matrix)
325
+ else:
326
+ return attention_output
327
+
328
+
329
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta
330
+ class DebertaIntermediate(nn.Module):
331
+ def __init__(self, config):
332
+ super().__init__()
333
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
334
+ if isinstance(config.hidden_act, str):
335
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
336
+ else:
337
+ self.intermediate_act_fn = config.hidden_act
338
+
339
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
340
+ hidden_states = self.dense(hidden_states)
341
+ hidden_states = self.intermediate_act_fn(hidden_states)
342
+ return hidden_states
343
+
344
+
345
+ class DebertaOutput(nn.Module):
346
+ def __init__(self, config):
347
+ super().__init__()
348
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
349
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
350
+ self.dropout = StableDropout(config.hidden_dropout_prob)
351
+ self.config = config
352
+
353
+ def forward(self, hidden_states, input_tensor):
354
+ hidden_states = self.dense(hidden_states)
355
+ hidden_states = self.dropout(hidden_states)
356
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
357
+ return hidden_states
358
+
359
+
360
+ class DebertaLayer(nn.Module):
361
+ def __init__(self, config):
362
+ super().__init__()
363
+ self.attention = DebertaAttention(config)
364
+ self.intermediate = DebertaIntermediate(config)
365
+ self.output = DebertaOutput(config)
366
+
367
+ def forward(
368
+ self,
369
+ hidden_states,
370
+ attention_mask,
371
+ query_states=None,
372
+ relative_pos=None,
373
+ rel_embeddings=None,
374
+ output_attentions=False,
375
+ ):
376
+ attention_output = self.attention(
377
+ hidden_states,
378
+ attention_mask,
379
+ output_attentions=output_attentions,
380
+ query_states=query_states,
381
+ relative_pos=relative_pos,
382
+ rel_embeddings=rel_embeddings,
383
+ )
384
+ if output_attentions:
385
+ attention_output, att_matrix = attention_output
386
+ intermediate_output = self.intermediate(attention_output)
387
+ layer_output = self.output(intermediate_output, attention_output)
388
+ if output_attentions:
389
+ return (layer_output, att_matrix)
390
+ else:
391
+ return layer_output
392
+
393
+
394
+ class DebertaEncoder(nn.Module):
395
+ """Modified BertEncoder with relative position bias support"""
396
+
397
+ def __init__(self, config):
398
+ super().__init__()
399
+ self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
400
+ self.relative_attention = getattr(config, "relative_attention", False)
401
+ if self.relative_attention:
402
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
403
+ if self.max_relative_positions < 1:
404
+ self.max_relative_positions = config.max_position_embeddings
405
+ self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
406
+ self.gradient_checkpointing = False
407
+
408
+ def get_rel_embedding(self):
409
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
410
+ return rel_embeddings
411
+
412
+ def get_attention_mask(self, attention_mask):
413
+ if attention_mask.dim() <= 2:
414
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
415
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
416
+ elif attention_mask.dim() == 3:
417
+ attention_mask = attention_mask.unsqueeze(1)
418
+
419
+ return attention_mask
420
+
421
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
422
+ if self.relative_attention and relative_pos is None:
423
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
424
+ relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)
425
+ return relative_pos
426
+
427
+ def forward(
428
+ self,
429
+ hidden_states,
430
+ attention_mask,
431
+ output_hidden_states=True,
432
+ output_attentions=False,
433
+ query_states=None,
434
+ relative_pos=None,
435
+ return_dict=True,
436
+ ):
437
+ attention_mask = self.get_attention_mask(attention_mask)
438
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
439
+
440
+ all_hidden_states = () if output_hidden_states else None
441
+ all_attentions = () if output_attentions else None
442
+
443
+ if isinstance(hidden_states, Sequence):
444
+ next_kv = hidden_states[0]
445
+ else:
446
+ next_kv = hidden_states
447
+ rel_embeddings = self.get_rel_embedding()
448
+ for i, layer_module in enumerate(self.layer):
449
+ if output_hidden_states:
450
+ all_hidden_states = all_hidden_states + (hidden_states,)
451
+
452
+ if self.gradient_checkpointing and self.training:
453
+ hidden_states = self._gradient_checkpointing_func(
454
+ layer_module.__call__,
455
+ next_kv,
456
+ attention_mask,
457
+ query_states,
458
+ relative_pos,
459
+ rel_embeddings,
460
+ output_attentions,
461
+ )
462
+ else:
463
+ hidden_states = layer_module(
464
+ next_kv,
465
+ attention_mask,
466
+ query_states=query_states,
467
+ relative_pos=relative_pos,
468
+ rel_embeddings=rel_embeddings,
469
+ output_attentions=output_attentions,
470
+ )
471
+
472
+ if output_attentions:
473
+ hidden_states, att_m = hidden_states
474
+
475
+ if query_states is not None:
476
+ query_states = hidden_states
477
+ if isinstance(hidden_states, Sequence):
478
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
479
+ else:
480
+ next_kv = hidden_states
481
+
482
+ if output_attentions:
483
+ all_attentions = all_attentions + (att_m,)
484
+
485
+ if output_hidden_states:
486
+ all_hidden_states = all_hidden_states + (hidden_states,)
487
+
488
+ if not return_dict:
489
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
490
+ return BaseModelOutput(
491
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
492
+ )
493
+
494
+
495
+ def build_relative_position(query_size, key_size, device):
496
+ """
497
+ Build relative position according to the query and key
498
+
499
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
500
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
501
+ P_k\\)
502
+
503
+ Args:
504
+ query_size (int): the length of query
505
+ key_size (int): the length of key
506
+
507
+ Return:
508
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
509
+
510
+ """
511
+
512
+ q_ids = torch.arange(query_size, dtype=torch.long, device=device)
513
+ k_ids = torch.arange(key_size, dtype=torch.long, device=device)
514
+ rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
515
+ rel_pos_ids = rel_pos_ids[:query_size, :]
516
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
517
+ return rel_pos_ids
518
+
519
+
520
+ @torch.jit.script
521
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
522
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
523
+
524
+
525
+ @torch.jit.script
526
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
527
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
528
+
529
+
530
+ @torch.jit.script
531
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
532
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
533
+
534
+
535
+ class DisentangledSelfAttention(nn.Module):
536
+ """
537
+ Disentangled self-attention module
538
+
539
+ Parameters:
540
+ config (`str`):
541
+ A model config class instance with the configuration to build a new model. The schema is similar to
542
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
543
+
544
+ """
545
+
546
+ def __init__(self, config):
547
+ super().__init__()
548
+ if config.hidden_size % config.num_attention_heads != 0:
549
+ raise ValueError(
550
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
551
+ f"heads ({config.num_attention_heads})"
552
+ )
553
+ self.num_attention_heads = config.num_attention_heads
554
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
555
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
556
+ self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
557
+ self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
558
+ self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
559
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
560
+
561
+ self.relative_attention = getattr(config, "relative_attention", False)
562
+ self.talking_head = getattr(config, "talking_head", False)
563
+
564
+ if self.talking_head:
565
+ self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
566
+ self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
567
+
568
+ if self.relative_attention:
569
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
570
+ if self.max_relative_positions < 1:
571
+ self.max_relative_positions = config.max_position_embeddings
572
+ self.pos_dropout = StableDropout(config.hidden_dropout_prob)
573
+
574
+ if "c2p" in self.pos_att_type:
575
+ self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
576
+ if "p2c" in self.pos_att_type:
577
+ self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
578
+
579
+ self.dropout = StableDropout(config.attention_probs_dropout_prob)
580
+
581
+ def transpose_for_scores(self, x):
582
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
583
+ x = x.view(new_x_shape)
584
+ return x.permute(0, 2, 1, 3)
585
+
586
+ def forward(
587
+ self,
588
+ hidden_states,
589
+ attention_mask,
590
+ output_attentions=False,
591
+ query_states=None,
592
+ relative_pos=None,
593
+ rel_embeddings=None,
594
+ ):
595
+ """
596
+ Call the module
597
+
598
+ Args:
599
+ hidden_states (`torch.FloatTensor`):
600
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
601
+ *Attention(Q,K,V)*
602
+
603
+ attention_mask (`torch.BoolTensor`):
604
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
605
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
606
+ th token.
607
+
608
+ output_attentions (`bool`, optional):
609
+ Whether return the attention matrix.
610
+
611
+ query_states (`torch.FloatTensor`, optional):
612
+ The *Q* state in *Attention(Q,K,V)*.
613
+
614
+ relative_pos (`torch.LongTensor`):
615
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
616
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
617
+
618
+ rel_embeddings (`torch.FloatTensor`):
619
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
620
+ \\text{max_relative_positions}\\), *hidden_size*].
621
+
622
+
623
+ """
624
+ if query_states is None:
625
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
626
+ query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)
627
+ else:
628
+
629
+ def linear(w, b, x):
630
+ if b is not None:
631
+ return torch.matmul(x, w.t()) + b.t()
632
+ else:
633
+ return torch.matmul(x, w.t()) # + b.t()
634
+
635
+ ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
636
+ qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
637
+ qkvb = [None] * 3
638
+
639
+ q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype))
640
+ k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)]
641
+ query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]
642
+
643
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
644
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
645
+
646
+ rel_att = None
647
+ # Take the dot product between "query" and "key" to get the raw attention scores.
648
+ scale_factor = 1 + len(self.pos_att_type)
649
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
650
+ query_layer = query_layer / scale.to(dtype=query_layer.dtype)
651
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
652
+ if self.relative_attention:
653
+ rel_embeddings = self.pos_dropout(rel_embeddings)
654
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
655
+
656
+ if rel_att is not None:
657
+ attention_scores = attention_scores + rel_att
658
+
659
+ # bxhxlxd
660
+ if self.talking_head:
661
+ attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
662
+
663
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
664
+ attention_probs = self.dropout(attention_probs)
665
+ if self.talking_head:
666
+ attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
667
+
668
+ context_layer = torch.matmul(attention_probs, value_layer)
669
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
670
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
671
+ context_layer = context_layer.view(new_context_layer_shape)
672
+ if output_attentions:
673
+ return (context_layer, attention_probs)
674
+ else:
675
+ return context_layer
676
+
677
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
678
+ if relative_pos is None:
679
+ q = query_layer.size(-2)
680
+ relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)
681
+ if relative_pos.dim() == 2:
682
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
683
+ elif relative_pos.dim() == 3:
684
+ relative_pos = relative_pos.unsqueeze(1)
685
+ # bxhxqxk
686
+ elif relative_pos.dim() != 4:
687
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
688
+
689
+ att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)
690
+ relative_pos = relative_pos.long().to(query_layer.device)
691
+ rel_embeddings = rel_embeddings[
692
+ self.max_relative_positions - att_span : self.max_relative_positions + att_span, :
693
+ ].unsqueeze(0)
694
+
695
+ score = 0
696
+
697
+ # content->position
698
+ if "c2p" in self.pos_att_type:
699
+ pos_key_layer = self.pos_proj(rel_embeddings)
700
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
701
+ c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
702
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
703
+ c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
704
+ score += c2p_att
705
+
706
+ # position->content
707
+ if "p2c" in self.pos_att_type:
708
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
709
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
710
+ pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
711
+ if query_layer.size(-2) != key_layer.size(-2):
712
+ r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)
713
+ else:
714
+ r_pos = relative_pos
715
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
716
+ p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype))
717
+ p2c_att = torch.gather(
718
+ p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)
719
+ ).transpose(-1, -2)
720
+
721
+ if query_layer.size(-2) != key_layer.size(-2):
722
+ pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
723
+ p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))
724
+ score += p2c_att
725
+
726
+ return score
727
+
728
+
729
+ class DebertaEmbeddings(nn.Module):
730
+ """Construct the embeddings from word, position and token_type embeddings."""
731
+
732
+ def __init__(self, config):
733
+ super().__init__()
734
+ pad_token_id = getattr(config, "pad_token_id", 0)
735
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
736
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
737
+
738
+ self.position_biased_input = getattr(config, "position_biased_input", True)
739
+ if not self.position_biased_input:
740
+ self.position_embeddings = None
741
+ else:
742
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
743
+
744
+ if config.type_vocab_size > 0:
745
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
746
+
747
+ if self.embedding_size != config.hidden_size:
748
+ self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
749
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
750
+ self.dropout = StableDropout(config.hidden_dropout_prob)
751
+ self.config = config
752
+
753
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
754
+ self.register_buffer(
755
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
756
+ )
757
+
758
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
759
+ if input_ids is not None:
760
+ input_shape = input_ids.size()
761
+ else:
762
+ input_shape = inputs_embeds.size()[:-1]
763
+
764
+ seq_length = input_shape[1]
765
+
766
+ if position_ids is None:
767
+ position_ids = self.position_ids[:, :seq_length]
768
+
769
+ if token_type_ids is None:
770
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
771
+
772
+ if inputs_embeds is None:
773
+ inputs_embeds = self.word_embeddings(input_ids)
774
+
775
+ if self.position_embeddings is not None:
776
+ position_embeddings = self.position_embeddings(position_ids.long())
777
+ else:
778
+ position_embeddings = torch.zeros_like(inputs_embeds)
779
+
780
+ embeddings = inputs_embeds
781
+ if self.position_biased_input:
782
+ embeddings += position_embeddings
783
+ if self.config.type_vocab_size > 0:
784
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
785
+ embeddings += token_type_embeddings
786
+
787
+ if self.embedding_size != self.config.hidden_size:
788
+ embeddings = self.embed_proj(embeddings)
789
+
790
+ embeddings = self.LayerNorm(embeddings)
791
+
792
+ if mask is not None:
793
+ if mask.dim() != embeddings.dim():
794
+ if mask.dim() == 4:
795
+ mask = mask.squeeze(1).squeeze(1)
796
+ mask = mask.unsqueeze(2)
797
+ mask = mask.to(embeddings.dtype)
798
+
799
+ embeddings = embeddings * mask
800
+
801
+ embeddings = self.dropout(embeddings)
802
+ return embeddings
803
+
804
+
805
+ class DebertaPreTrainedModel(PreTrainedModel):
806
+ """
807
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
808
+ models.
809
+ """
810
+
811
+ config_class = DebertaConfig
812
+ base_model_prefix = "deberta"
813
+ _keys_to_ignore_on_load_unexpected = ["position_embeddings"]
814
+ supports_gradient_checkpointing = True
815
+
816
+ def _init_weights(self, module):
817
+ """Initialize the weights."""
818
+ if isinstance(module, nn.Linear):
819
+ # Slightly different from the TF version which uses truncated_normal for initialization
820
+ # cf https://github.com/pytorch/pytorch/pull/5617
821
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
822
+ if module.bias is not None:
823
+ module.bias.data.zero_()
824
+ elif isinstance(module, nn.Embedding):
825
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
826
+ if module.padding_idx is not None:
827
+ module.weight.data[module.padding_idx].zero_()
828
+
829
+
830
+ DEBERTA_START_DOCSTRING = r"""
831
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
832
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
833
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
834
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
835
+
836
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
837
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
838
+ and behavior.
839
+
840
+
841
+ Parameters:
842
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
843
+ Initializing with a config file does not load the weights associated with the model, only the
844
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
845
+ """
846
+
847
+ DEBERTA_INPUTS_DOCSTRING = r"""
848
+ Args:
849
+ input_ids (`torch.LongTensor` of shape `({0})`):
850
+ Indices of input sequence tokens in the vocabulary.
851
+
852
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
853
+ [`PreTrainedTokenizer.__call__`] for details.
854
+
855
+ [What are input IDs?](../glossary#input-ids)
856
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
857
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
858
+
859
+ - 1 for tokens that are **not masked**,
860
+ - 0 for tokens that are **masked**.
861
+
862
+ [What are attention masks?](../glossary#attention-mask)
863
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
864
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
865
+ 1]`:
866
+
867
+ - 0 corresponds to a *sentence A* token,
868
+ - 1 corresponds to a *sentence B* token.
869
+
870
+ [What are token type IDs?](../glossary#token-type-ids)
871
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
872
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
873
+ config.max_position_embeddings - 1]`.
874
+
875
+ [What are position IDs?](../glossary#position-ids)
876
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
877
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
878
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
879
+ model's internal embedding lookup matrix.
880
+ output_attentions (`bool`, *optional*):
881
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
882
+ tensors for more detail.
883
+ output_hidden_states (`bool`, *optional*):
884
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
885
+ more detail.
886
+ return_dict (`bool`, *optional*):
887
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
888
+ """
889
+
890
+
891
+ @add_start_docstrings(
892
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
893
+ DEBERTA_START_DOCSTRING,
894
+ )
895
+ class DebertaModel(DebertaPreTrainedModel):
896
+ def __init__(self, config):
897
+ super().__init__(config)
898
+
899
+ self.embeddings = DebertaEmbeddings(config)
900
+ self.encoder = DebertaEncoder(config)
901
+ self.z_steps = 0
902
+ self.config = config
903
+ # Initialize weights and apply final processing
904
+ self.post_init()
905
+
906
+ def get_input_embeddings(self):
907
+ return self.embeddings.word_embeddings
908
+
909
+ def set_input_embeddings(self, new_embeddings):
910
+ self.embeddings.word_embeddings = new_embeddings
911
+
912
+ def _prune_heads(self, heads_to_prune):
913
+ """
914
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
915
+ class PreTrainedModel
916
+ """
917
+ raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
918
+
919
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
920
+ @add_code_sample_docstrings(
921
+ checkpoint=_CHECKPOINT_FOR_DOC,
922
+ output_type=BaseModelOutput,
923
+ config_class=_CONFIG_FOR_DOC,
924
+ )
925
+ def forward(
926
+ self,
927
+ input_ids: Optional[torch.Tensor] = None,
928
+ attention_mask: Optional[torch.Tensor] = None,
929
+ token_type_ids: Optional[torch.Tensor] = None,
930
+ position_ids: Optional[torch.Tensor] = None,
931
+ inputs_embeds: Optional[torch.Tensor] = None,
932
+ output_attentions: Optional[bool] = None,
933
+ output_hidden_states: Optional[bool] = None,
934
+ return_dict: Optional[bool] = None,
935
+ ) -> Union[Tuple, BaseModelOutput]:
936
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
937
+ output_hidden_states = (
938
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
939
+ )
940
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
941
+
942
+ if input_ids is not None and inputs_embeds is not None:
943
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
944
+ elif input_ids is not None:
945
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
946
+ input_shape = input_ids.size()
947
+ elif inputs_embeds is not None:
948
+ input_shape = inputs_embeds.size()[:-1]
949
+ else:
950
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
951
+
952
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
953
+
954
+ if attention_mask is None:
955
+ attention_mask = torch.ones(input_shape, device=device)
956
+ if token_type_ids is None:
957
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
958
+
959
+ embedding_output = self.embeddings(
960
+ input_ids=input_ids,
961
+ token_type_ids=token_type_ids,
962
+ position_ids=position_ids,
963
+ mask=attention_mask,
964
+ inputs_embeds=inputs_embeds,
965
+ )
966
+
967
+ encoder_outputs = self.encoder(
968
+ embedding_output,
969
+ attention_mask,
970
+ output_hidden_states=True,
971
+ output_attentions=output_attentions,
972
+ return_dict=return_dict,
973
+ )
974
+ encoded_layers = encoder_outputs[1]
975
+
976
+ if self.z_steps > 1:
977
+ hidden_states = encoded_layers[-2]
978
+ layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
979
+ query_states = encoded_layers[-1]
980
+ rel_embeddings = self.encoder.get_rel_embedding()
981
+ attention_mask = self.encoder.get_attention_mask(attention_mask)
982
+ rel_pos = self.encoder.get_rel_pos(embedding_output)
983
+ for layer in layers[1:]:
984
+ query_states = layer(
985
+ hidden_states,
986
+ attention_mask,
987
+ output_attentions=False,
988
+ query_states=query_states,
989
+ relative_pos=rel_pos,
990
+ rel_embeddings=rel_embeddings,
991
+ )
992
+ encoded_layers.append(query_states)
993
+
994
+ sequence_output = encoded_layers[-1]
995
+
996
+ if not return_dict:
997
+ return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
998
+
999
+ return BaseModelOutput(
1000
+ last_hidden_state=sequence_output,
1001
+ hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
1002
+ attentions=encoder_outputs.attentions,
1003
+ )
1004
+
1005
+
1006
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1007
+ class DebertaForMaskedLM(DebertaPreTrainedModel):
1008
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1009
+
1010
+ def __init__(self, config):
1011
+ super().__init__(config)
1012
+
1013
+ self.deberta = DebertaModel(config)
1014
+ self.cls = DebertaOnlyMLMHead(config)
1015
+
1016
+ # Initialize weights and apply final processing
1017
+ self.post_init()
1018
+
1019
+ def get_output_embeddings(self):
1020
+ return self.cls.predictions.decoder
1021
+
1022
+ def set_output_embeddings(self, new_embeddings):
1023
+ self.cls.predictions.decoder = new_embeddings
1024
+
1025
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1026
+ @add_code_sample_docstrings(
1027
+ checkpoint=_CHECKPOINT_FOR_MASKED_LM,
1028
+ output_type=MaskedLMOutput,
1029
+ config_class=_CONFIG_FOR_DOC,
1030
+ mask="[MASK]",
1031
+ expected_output=_MASKED_LM_EXPECTED_OUTPUT,
1032
+ expected_loss=_MASKED_LM_EXPECTED_LOSS,
1033
+ )
1034
+ def forward(
1035
+ self,
1036
+ input_ids: Optional[torch.Tensor] = None,
1037
+ attention_mask: Optional[torch.Tensor] = None,
1038
+ token_type_ids: Optional[torch.Tensor] = None,
1039
+ position_ids: Optional[torch.Tensor] = None,
1040
+ inputs_embeds: Optional[torch.Tensor] = None,
1041
+ labels: Optional[torch.Tensor] = None,
1042
+ output_attentions: Optional[bool] = None,
1043
+ output_hidden_states: Optional[bool] = None,
1044
+ return_dict: Optional[bool] = None,
1045
+ ) -> Union[Tuple, MaskedLMOutput]:
1046
+ r"""
1047
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1048
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1049
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1050
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1051
+ """
1052
+
1053
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1054
+
1055
+ outputs = self.deberta(
1056
+ input_ids,
1057
+ attention_mask=attention_mask,
1058
+ token_type_ids=token_type_ids,
1059
+ position_ids=position_ids,
1060
+ inputs_embeds=inputs_embeds,
1061
+ output_attentions=output_attentions,
1062
+ output_hidden_states=output_hidden_states,
1063
+ return_dict=return_dict,
1064
+ )
1065
+
1066
+ sequence_output = outputs[0]
1067
+ prediction_scores = self.cls(sequence_output)
1068
+
1069
+ masked_lm_loss = None
1070
+ if labels is not None:
1071
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1072
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1073
+
1074
+ if not return_dict:
1075
+ output = (prediction_scores,) + outputs[1:]
1076
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1077
+
1078
+ return MaskedLMOutput(
1079
+ loss=masked_lm_loss,
1080
+ logits=prediction_scores,
1081
+ hidden_states=outputs.hidden_states,
1082
+ attentions=outputs.attentions,
1083
+ )
1084
+
1085
+
1086
+ class DebertaPredictionHeadTransform(nn.Module):
1087
+ def __init__(self, config):
1088
+ super().__init__()
1089
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1090
+
1091
+ self.dense = nn.Linear(config.hidden_size, self.embedding_size)
1092
+ if isinstance(config.hidden_act, str):
1093
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1094
+ else:
1095
+ self.transform_act_fn = config.hidden_act
1096
+ self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
1097
+
1098
+ def forward(self, hidden_states):
1099
+ hidden_states = self.dense(hidden_states)
1100
+ hidden_states = self.transform_act_fn(hidden_states)
1101
+ hidden_states = self.LayerNorm(hidden_states)
1102
+ return hidden_states
1103
+
1104
+
1105
+ class DebertaLMPredictionHead(nn.Module):
1106
+ def __init__(self, config):
1107
+ super().__init__()
1108
+ self.transform = DebertaPredictionHeadTransform(config)
1109
+
1110
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1111
+ # The output weights are the same as the input embeddings, but there is
1112
+ # an output-only bias for each token.
1113
+ self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
1114
+
1115
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1116
+
1117
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1118
+ self.decoder.bias = self.bias
1119
+
1120
+ def forward(self, hidden_states):
1121
+ hidden_states = self.transform(hidden_states)
1122
+ hidden_states = self.decoder(hidden_states)
1123
+ return hidden_states
1124
+
1125
+
1126
+ # copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
1127
+ class DebertaOnlyMLMHead(nn.Module):
1128
+ def __init__(self, config):
1129
+ super().__init__()
1130
+ self.predictions = DebertaLMPredictionHead(config)
1131
+
1132
+ def forward(self, sequence_output):
1133
+ prediction_scores = self.predictions(sequence_output)
1134
+ return prediction_scores
1135
+
1136
+
1137
+ @add_start_docstrings(
1138
+ """
1139
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1140
+ pooled output) e.g. for GLUE tasks.
1141
+ """,
1142
+ DEBERTA_START_DOCSTRING,
1143
+ )
1144
+ class DebertaForSequenceClassification(DebertaPreTrainedModel):
1145
+ def __init__(self, config):
1146
+ super().__init__(config)
1147
+
1148
+ num_labels = getattr(config, "num_labels", 2)
1149
+ self.num_labels = num_labels
1150
+
1151
+ self.deberta = DebertaModel(config)
1152
+ self.pooler = ContextPooler(config)
1153
+ output_dim = self.pooler.output_dim
1154
+
1155
+ self.classifier = nn.Linear(output_dim, num_labels)
1156
+ drop_out = getattr(config, "cls_dropout", None)
1157
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1158
+ self.dropout = StableDropout(drop_out)
1159
+
1160
+ # Initialize weights and apply final processing
1161
+ self.post_init()
1162
+
1163
+ def get_input_embeddings(self):
1164
+ return self.deberta.get_input_embeddings()
1165
+
1166
+ def set_input_embeddings(self, new_embeddings):
1167
+ self.deberta.set_input_embeddings(new_embeddings)
1168
+
1169
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1170
+ @add_code_sample_docstrings(
1171
+ checkpoint=_CHECKPOINT_FOR_DOC,
1172
+ output_type=SequenceClassifierOutput,
1173
+ config_class=_CONFIG_FOR_DOC,
1174
+ )
1175
+ def forward(
1176
+ self,
1177
+ input_ids: Optional[torch.Tensor] = None,
1178
+ attention_mask: Optional[torch.Tensor] = None,
1179
+ token_type_ids: Optional[torch.Tensor] = None,
1180
+ position_ids: Optional[torch.Tensor] = None,
1181
+ inputs_embeds: Optional[torch.Tensor] = None,
1182
+ labels: Optional[torch.Tensor] = None,
1183
+ output_attentions: Optional[bool] = None,
1184
+ output_hidden_states: Optional[bool] = None,
1185
+ return_dict: Optional[bool] = None,
1186
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1187
+ r"""
1188
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1189
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1190
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1191
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1192
+ """
1193
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1194
+
1195
+ outputs = self.deberta(
1196
+ input_ids,
1197
+ token_type_ids=token_type_ids,
1198
+ attention_mask=attention_mask,
1199
+ position_ids=position_ids,
1200
+ inputs_embeds=inputs_embeds,
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ )
1205
+
1206
+ encoder_layer = outputs[0]
1207
+ pooled_output = self.pooler(encoder_layer)
1208
+ pooled_output = self.dropout(pooled_output)
1209
+ logits = self.classifier(pooled_output)
1210
+
1211
+ loss = None
1212
+ if labels is not None:
1213
+ if self.config.problem_type is None:
1214
+ if self.num_labels == 1:
1215
+ # regression task
1216
+ loss_fn = nn.MSELoss()
1217
+ logits = logits.view(-1).to(labels.dtype)
1218
+ loss = loss_fn(logits, labels.view(-1))
1219
+ elif labels.dim() == 1 or labels.size(-1) == 1:
1220
+ label_index = (labels >= 0).nonzero()
1221
+ labels = labels.long()
1222
+ if label_index.size(0) > 0:
1223
+ labeled_logits = torch.gather(
1224
+ logits, 0, label_index.expand(label_index.size(0), logits.size(1))
1225
+ )
1226
+ labels = torch.gather(labels, 0, label_index.view(-1))
1227
+ loss_fct = CrossEntropyLoss()
1228
+ loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
1229
+ else:
1230
+ loss = torch.tensor(0).to(logits)
1231
+ else:
1232
+ log_softmax = nn.LogSoftmax(-1)
1233
+ loss = -((log_softmax(logits) * labels).sum(-1)).mean()
1234
+ elif self.config.problem_type == "regression":
1235
+ loss_fct = MSELoss()
1236
+ if self.num_labels == 1:
1237
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1238
+ else:
1239
+ loss = loss_fct(logits, labels)
1240
+ elif self.config.problem_type == "single_label_classification":
1241
+ loss_fct = CrossEntropyLoss()
1242
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1243
+ elif self.config.problem_type == "multi_label_classification":
1244
+ loss_fct = BCEWithLogitsLoss()
1245
+ loss = loss_fct(logits, labels)
1246
+ if not return_dict:
1247
+ output = (logits,) + outputs[1:]
1248
+ return ((loss,) + output) if loss is not None else output
1249
+
1250
+ return SequenceClassifierOutput(
1251
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1252
+ )
1253
+
1254
+
1255
+ @add_start_docstrings(
1256
+ """
1257
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1258
+ Named-Entity-Recognition (NER) tasks.
1259
+ """,
1260
+ DEBERTA_START_DOCSTRING,
1261
+ )
1262
+ class DebertaForTokenClassification(DebertaPreTrainedModel):
1263
+ def __init__(self, config):
1264
+ super().__init__(config)
1265
+ self.num_labels = config.num_labels
1266
+
1267
+ self.deberta = DebertaModel(config)
1268
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1269
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1270
+
1271
+ # Initialize weights and apply final processing
1272
+ self.post_init()
1273
+
1274
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1275
+ @add_code_sample_docstrings(
1276
+ checkpoint=_CHECKPOINT_FOR_DOC,
1277
+ output_type=TokenClassifierOutput,
1278
+ config_class=_CONFIG_FOR_DOC,
1279
+ )
1280
+ def forward(
1281
+ self,
1282
+ input_ids: Optional[torch.Tensor] = None,
1283
+ attention_mask: Optional[torch.Tensor] = None,
1284
+ token_type_ids: Optional[torch.Tensor] = None,
1285
+ position_ids: Optional[torch.Tensor] = None,
1286
+ inputs_embeds: Optional[torch.Tensor] = None,
1287
+ labels: Optional[torch.Tensor] = None,
1288
+ output_attentions: Optional[bool] = None,
1289
+ output_hidden_states: Optional[bool] = None,
1290
+ return_dict: Optional[bool] = None,
1291
+ ) -> Union[Tuple, TokenClassifierOutput]:
1292
+ r"""
1293
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1294
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1295
+ """
1296
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1297
+
1298
+ outputs = self.deberta(
1299
+ input_ids,
1300
+ attention_mask=attention_mask,
1301
+ token_type_ids=token_type_ids,
1302
+ position_ids=position_ids,
1303
+ inputs_embeds=inputs_embeds,
1304
+ output_attentions=output_attentions,
1305
+ output_hidden_states=output_hidden_states,
1306
+ return_dict=return_dict,
1307
+ )
1308
+
1309
+ sequence_output = outputs[0]
1310
+
1311
+ sequence_output = self.dropout(sequence_output)
1312
+ logits = self.classifier(sequence_output)
1313
+
1314
+ loss = None
1315
+ if labels is not None:
1316
+ loss_fct = CrossEntropyLoss()
1317
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[1:]
1321
+ return ((loss,) + output) if loss is not None else output
1322
+
1323
+ return TokenClassifierOutput(
1324
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1325
+ )
1326
+
1327
+
1328
+ @add_start_docstrings(
1329
+ """
1330
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1331
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1332
+ """,
1333
+ DEBERTA_START_DOCSTRING,
1334
+ )
1335
+ class DebertaForQuestionAnswering(DebertaPreTrainedModel):
1336
+ def __init__(self, config):
1337
+ super().__init__(config)
1338
+ self.num_labels = config.num_labels
1339
+
1340
+ self.deberta = DebertaModel(config)
1341
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1342
+
1343
+ # Initialize weights and apply final processing
1344
+ self.post_init()
1345
+
1346
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1347
+ @add_code_sample_docstrings(
1348
+ checkpoint=_CHECKPOINT_FOR_QA,
1349
+ output_type=QuestionAnsweringModelOutput,
1350
+ config_class=_CONFIG_FOR_DOC,
1351
+ expected_output=_QA_EXPECTED_OUTPUT,
1352
+ expected_loss=_QA_EXPECTED_LOSS,
1353
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1354
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1355
+ )
1356
+ def forward(
1357
+ self,
1358
+ input_ids: Optional[torch.Tensor] = None,
1359
+ attention_mask: Optional[torch.Tensor] = None,
1360
+ token_type_ids: Optional[torch.Tensor] = None,
1361
+ position_ids: Optional[torch.Tensor] = None,
1362
+ inputs_embeds: Optional[torch.Tensor] = None,
1363
+ start_positions: Optional[torch.Tensor] = None,
1364
+ end_positions: Optional[torch.Tensor] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ return_dict: Optional[bool] = None,
1368
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1369
+ r"""
1370
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1371
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1372
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1373
+ are not taken into account for computing the loss.
1374
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ outputs = self.deberta(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ token_type_ids=token_type_ids,
1385
+ position_ids=position_ids,
1386
+ inputs_embeds=inputs_embeds,
1387
+ output_attentions=output_attentions,
1388
+ output_hidden_states=output_hidden_states,
1389
+ return_dict=return_dict,
1390
+ )
1391
+
1392
+ sequence_output = outputs[0]
1393
+
1394
+ logits = self.qa_outputs(sequence_output)
1395
+ start_logits, end_logits = logits.split(1, dim=-1)
1396
+ start_logits = start_logits.squeeze(-1).contiguous()
1397
+ end_logits = end_logits.squeeze(-1).contiguous()
1398
+
1399
+ total_loss = None
1400
+ if start_positions is not None and end_positions is not None:
1401
+ # If we are on multi-GPU, split add a dimension
1402
+ if len(start_positions.size()) > 1:
1403
+ start_positions = start_positions.squeeze(-1)
1404
+ if len(end_positions.size()) > 1:
1405
+ end_positions = end_positions.squeeze(-1)
1406
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1407
+ ignored_index = start_logits.size(1)
1408
+ start_positions = start_positions.clamp(0, ignored_index)
1409
+ end_positions = end_positions.clamp(0, ignored_index)
1410
+
1411
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1412
+ start_loss = loss_fct(start_logits, start_positions)
1413
+ end_loss = loss_fct(end_logits, end_positions)
1414
+ total_loss = (start_loss + end_loss) / 2
1415
+
1416
+ if not return_dict:
1417
+ output = (start_logits, end_logits) + outputs[1:]
1418
+ return ((total_loss,) + output) if total_loss is not None else output
1419
+
1420
+ return QuestionAnsweringModelOutput(
1421
+ loss=total_loss,
1422
+ start_logits=start_logits,
1423
+ end_logits=end_logits,
1424
+ hidden_states=outputs.hidden_states,
1425
+ attentions=outputs.attentions,
1426
+ )
venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py ADDED
@@ -0,0 +1,1644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 DeBERTa model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ from typing import Dict, Optional, Sequence, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFMaskedLMOutput,
30
+ TFQuestionAnsweringModelOutput,
31
+ TFSequenceClassifierOutput,
32
+ TFTokenClassifierOutput,
33
+ )
34
+ from ...modeling_tf_utils import (
35
+ TFMaskedLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFQuestionAnsweringLoss,
39
+ TFSequenceClassificationLoss,
40
+ TFTokenClassificationLoss,
41
+ get_initializer,
42
+ keras,
43
+ unpack_inputs,
44
+ )
45
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
46
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
47
+ from .configuration_deberta import DebertaConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+
53
+ _CONFIG_FOR_DOC = "DebertaConfig"
54
+ _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base"
55
+
56
+
57
+ from ..deprecated._archive_maps import TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ class TFDebertaContextPooler(keras.layers.Layer):
61
+ def __init__(self, config: DebertaConfig, **kwargs):
62
+ super().__init__(**kwargs)
63
+ self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense")
64
+ self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout")
65
+ self.config = config
66
+
67
+ def call(self, hidden_states, training: bool = False):
68
+ # We "pool" the model by simply taking the hidden state corresponding
69
+ # to the first token.
70
+ context_token = hidden_states[:, 0]
71
+ context_token = self.dropout(context_token, training=training)
72
+ pooled_output = self.dense(context_token)
73
+ pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
74
+ return pooled_output
75
+
76
+ @property
77
+ def output_dim(self) -> int:
78
+ return self.config.hidden_size
79
+
80
+ def build(self, input_shape=None):
81
+ if self.built:
82
+ return
83
+ self.built = True
84
+ if getattr(self, "dense", None) is not None:
85
+ with tf.name_scope(self.dense.name):
86
+ self.dense.build([None, None, self.config.pooler_hidden_size])
87
+ if getattr(self, "dropout", None) is not None:
88
+ with tf.name_scope(self.dropout.name):
89
+ self.dropout.build(None)
90
+
91
+
92
+ class TFDebertaXSoftmax(keras.layers.Layer):
93
+ """
94
+ Masked Softmax which is optimized for saving memory
95
+
96
+ Args:
97
+ input (`tf.Tensor`): The input tensor that will apply softmax.
98
+ mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
99
+ dim (int): The dimension that will apply softmax
100
+ """
101
+
102
+ def __init__(self, axis=-1, **kwargs):
103
+ super().__init__(**kwargs)
104
+ self.axis = axis
105
+
106
+ def call(self, inputs: tf.Tensor, mask: tf.Tensor):
107
+ rmask = tf.logical_not(tf.cast(mask, tf.bool))
108
+ output = tf.where(rmask, float("-inf"), inputs)
109
+ output = stable_softmax(output, self.axis)
110
+ output = tf.where(rmask, 0.0, output)
111
+ return output
112
+
113
+
114
+ class TFDebertaStableDropout(keras.layers.Layer):
115
+ """
116
+ Optimized dropout module for stabilizing the training
117
+
118
+ Args:
119
+ drop_prob (float): the dropout probabilities
120
+ """
121
+
122
+ def __init__(self, drop_prob, **kwargs):
123
+ super().__init__(**kwargs)
124
+ self.drop_prob = drop_prob
125
+
126
+ @tf.custom_gradient
127
+ def xdropout(self, inputs):
128
+ """
129
+ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
130
+ """
131
+ mask = tf.cast(
132
+ 1
133
+ - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
134
+ tf.bool,
135
+ )
136
+ scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
137
+ if self.drop_prob > 0:
138
+ inputs = tf.where(mask, 0.0, inputs) * scale
139
+
140
+ def grad(upstream):
141
+ if self.drop_prob > 0:
142
+ return tf.where(mask, 0.0, upstream) * scale
143
+ else:
144
+ return upstream
145
+
146
+ return inputs, grad
147
+
148
+ def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
149
+ if training:
150
+ return self.xdropout(inputs)
151
+ return inputs
152
+
153
+
154
+ class TFDebertaLayerNorm(keras.layers.Layer):
155
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
156
+
157
+ def __init__(self, size, eps=1e-12, **kwargs):
158
+ super().__init__(**kwargs)
159
+ self.size = size
160
+ self.eps = eps
161
+
162
+ def build(self, input_shape):
163
+ self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight")
164
+ self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias")
165
+ return super().build(input_shape)
166
+
167
+ def call(self, x: tf.Tensor) -> tf.Tensor:
168
+ mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
169
+ variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
170
+ std = tf.math.sqrt(variance + self.eps)
171
+ return self.gamma * (x - mean) / std + self.beta
172
+
173
+
174
+ class TFDebertaSelfOutput(keras.layers.Layer):
175
+ def __init__(self, config: DebertaConfig, **kwargs):
176
+ super().__init__(**kwargs)
177
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
178
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
179
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
180
+ self.config = config
181
+
182
+ def call(self, hidden_states, input_tensor, training: bool = False):
183
+ hidden_states = self.dense(hidden_states)
184
+ hidden_states = self.dropout(hidden_states, training=training)
185
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
186
+ return hidden_states
187
+
188
+ def build(self, input_shape=None):
189
+ if self.built:
190
+ return
191
+ self.built = True
192
+ if getattr(self, "dense", None) is not None:
193
+ with tf.name_scope(self.dense.name):
194
+ self.dense.build([None, None, self.config.hidden_size])
195
+ if getattr(self, "LayerNorm", None) is not None:
196
+ with tf.name_scope(self.LayerNorm.name):
197
+ self.LayerNorm.build([None, None, self.config.hidden_size])
198
+ if getattr(self, "dropout", None) is not None:
199
+ with tf.name_scope(self.dropout.name):
200
+ self.dropout.build(None)
201
+
202
+
203
+ class TFDebertaAttention(keras.layers.Layer):
204
+ def __init__(self, config: DebertaConfig, **kwargs):
205
+ super().__init__(**kwargs)
206
+ self.self = TFDebertaDisentangledSelfAttention(config, name="self")
207
+ self.dense_output = TFDebertaSelfOutput(config, name="output")
208
+ self.config = config
209
+
210
+ def call(
211
+ self,
212
+ input_tensor: tf.Tensor,
213
+ attention_mask: tf.Tensor,
214
+ query_states: tf.Tensor = None,
215
+ relative_pos: tf.Tensor = None,
216
+ rel_embeddings: tf.Tensor = None,
217
+ output_attentions: bool = False,
218
+ training: bool = False,
219
+ ) -> Tuple[tf.Tensor]:
220
+ self_outputs = self.self(
221
+ hidden_states=input_tensor,
222
+ attention_mask=attention_mask,
223
+ query_states=query_states,
224
+ relative_pos=relative_pos,
225
+ rel_embeddings=rel_embeddings,
226
+ output_attentions=output_attentions,
227
+ training=training,
228
+ )
229
+ if query_states is None:
230
+ query_states = input_tensor
231
+ attention_output = self.dense_output(
232
+ hidden_states=self_outputs[0], input_tensor=query_states, training=training
233
+ )
234
+
235
+ output = (attention_output,) + self_outputs[1:]
236
+
237
+ return output
238
+
239
+ def build(self, input_shape=None):
240
+ if self.built:
241
+ return
242
+ self.built = True
243
+ if getattr(self, "self", None) is not None:
244
+ with tf.name_scope(self.self.name):
245
+ self.self.build(None)
246
+ if getattr(self, "dense_output", None) is not None:
247
+ with tf.name_scope(self.dense_output.name):
248
+ self.dense_output.build(None)
249
+
250
+
251
+ class TFDebertaIntermediate(keras.layers.Layer):
252
+ def __init__(self, config: DebertaConfig, **kwargs):
253
+ super().__init__(**kwargs)
254
+
255
+ self.dense = keras.layers.Dense(
256
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
257
+ )
258
+
259
+ if isinstance(config.hidden_act, str):
260
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
261
+ else:
262
+ self.intermediate_act_fn = config.hidden_act
263
+ self.config = config
264
+
265
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
266
+ hidden_states = self.dense(inputs=hidden_states)
267
+ hidden_states = self.intermediate_act_fn(hidden_states)
268
+
269
+ return hidden_states
270
+
271
+ def build(self, input_shape=None):
272
+ if self.built:
273
+ return
274
+ self.built = True
275
+ if getattr(self, "dense", None) is not None:
276
+ with tf.name_scope(self.dense.name):
277
+ self.dense.build([None, None, self.config.hidden_size])
278
+
279
+
280
+ class TFDebertaOutput(keras.layers.Layer):
281
+ def __init__(self, config: DebertaConfig, **kwargs):
282
+ super().__init__(**kwargs)
283
+
284
+ self.dense = keras.layers.Dense(
285
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
286
+ )
287
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
288
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
289
+ self.config = config
290
+
291
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
292
+ hidden_states = self.dense(inputs=hidden_states)
293
+ hidden_states = self.dropout(hidden_states, training=training)
294
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
295
+
296
+ return hidden_states
297
+
298
+ def build(self, input_shape=None):
299
+ if self.built:
300
+ return
301
+ self.built = True
302
+ if getattr(self, "dense", None) is not None:
303
+ with tf.name_scope(self.dense.name):
304
+ self.dense.build([None, None, self.config.intermediate_size])
305
+ if getattr(self, "LayerNorm", None) is not None:
306
+ with tf.name_scope(self.LayerNorm.name):
307
+ self.LayerNorm.build([None, None, self.config.hidden_size])
308
+ if getattr(self, "dropout", None) is not None:
309
+ with tf.name_scope(self.dropout.name):
310
+ self.dropout.build(None)
311
+
312
+
313
+ class TFDebertaLayer(keras.layers.Layer):
314
+ def __init__(self, config: DebertaConfig, **kwargs):
315
+ super().__init__(**kwargs)
316
+
317
+ self.attention = TFDebertaAttention(config, name="attention")
318
+ self.intermediate = TFDebertaIntermediate(config, name="intermediate")
319
+ self.bert_output = TFDebertaOutput(config, name="output")
320
+
321
+ def call(
322
+ self,
323
+ hidden_states: tf.Tensor,
324
+ attention_mask: tf.Tensor,
325
+ query_states: tf.Tensor = None,
326
+ relative_pos: tf.Tensor = None,
327
+ rel_embeddings: tf.Tensor = None,
328
+ output_attentions: bool = False,
329
+ training: bool = False,
330
+ ) -> Tuple[tf.Tensor]:
331
+ attention_outputs = self.attention(
332
+ input_tensor=hidden_states,
333
+ attention_mask=attention_mask,
334
+ query_states=query_states,
335
+ relative_pos=relative_pos,
336
+ rel_embeddings=rel_embeddings,
337
+ output_attentions=output_attentions,
338
+ training=training,
339
+ )
340
+ attention_output = attention_outputs[0]
341
+ intermediate_output = self.intermediate(hidden_states=attention_output)
342
+ layer_output = self.bert_output(
343
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
344
+ )
345
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
346
+
347
+ return outputs
348
+
349
+ def build(self, input_shape=None):
350
+ if self.built:
351
+ return
352
+ self.built = True
353
+ if getattr(self, "attention", None) is not None:
354
+ with tf.name_scope(self.attention.name):
355
+ self.attention.build(None)
356
+ if getattr(self, "intermediate", None) is not None:
357
+ with tf.name_scope(self.intermediate.name):
358
+ self.intermediate.build(None)
359
+ if getattr(self, "bert_output", None) is not None:
360
+ with tf.name_scope(self.bert_output.name):
361
+ self.bert_output.build(None)
362
+
363
+
364
+ class TFDebertaEncoder(keras.layers.Layer):
365
+ def __init__(self, config: DebertaConfig, **kwargs):
366
+ super().__init__(**kwargs)
367
+
368
+ self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
369
+ self.relative_attention = getattr(config, "relative_attention", False)
370
+ self.config = config
371
+ if self.relative_attention:
372
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
373
+ if self.max_relative_positions < 1:
374
+ self.max_relative_positions = config.max_position_embeddings
375
+
376
+ def build(self, input_shape=None):
377
+ if self.built:
378
+ return
379
+ self.built = True
380
+ if self.relative_attention:
381
+ self.rel_embeddings = self.add_weight(
382
+ name="rel_embeddings.weight",
383
+ shape=[self.max_relative_positions * 2, self.config.hidden_size],
384
+ initializer=get_initializer(self.config.initializer_range),
385
+ )
386
+ if getattr(self, "layer", None) is not None:
387
+ for layer in self.layer:
388
+ with tf.name_scope(layer.name):
389
+ layer.build(None)
390
+
391
+ def get_rel_embedding(self):
392
+ rel_embeddings = self.rel_embeddings if self.relative_attention else None
393
+ return rel_embeddings
394
+
395
+ def get_attention_mask(self, attention_mask):
396
+ if len(shape_list(attention_mask)) <= 2:
397
+ extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
398
+ attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
399
+ attention_mask = tf.cast(attention_mask, tf.uint8)
400
+ elif len(shape_list(attention_mask)) == 3:
401
+ attention_mask = tf.expand_dims(attention_mask, 1)
402
+
403
+ return attention_mask
404
+
405
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
406
+ if self.relative_attention and relative_pos is None:
407
+ q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
408
+ relative_pos = build_relative_position(q, shape_list(hidden_states)[-2])
409
+ return relative_pos
410
+
411
+ def call(
412
+ self,
413
+ hidden_states: tf.Tensor,
414
+ attention_mask: tf.Tensor,
415
+ query_states: tf.Tensor = None,
416
+ relative_pos: tf.Tensor = None,
417
+ output_attentions: bool = False,
418
+ output_hidden_states: bool = False,
419
+ return_dict: bool = True,
420
+ training: bool = False,
421
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
422
+ all_hidden_states = () if output_hidden_states else None
423
+ all_attentions = () if output_attentions else None
424
+
425
+ attention_mask = self.get_attention_mask(attention_mask)
426
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
427
+
428
+ if isinstance(hidden_states, Sequence):
429
+ next_kv = hidden_states[0]
430
+ else:
431
+ next_kv = hidden_states
432
+
433
+ rel_embeddings = self.get_rel_embedding()
434
+
435
+ for i, layer_module in enumerate(self.layer):
436
+ if output_hidden_states:
437
+ all_hidden_states = all_hidden_states + (hidden_states,)
438
+
439
+ layer_outputs = layer_module(
440
+ hidden_states=next_kv,
441
+ attention_mask=attention_mask,
442
+ query_states=query_states,
443
+ relative_pos=relative_pos,
444
+ rel_embeddings=rel_embeddings,
445
+ output_attentions=output_attentions,
446
+ training=training,
447
+ )
448
+ hidden_states = layer_outputs[0]
449
+
450
+ if query_states is not None:
451
+ query_states = hidden_states
452
+ if isinstance(hidden_states, Sequence):
453
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
454
+ else:
455
+ next_kv = hidden_states
456
+
457
+ if output_attentions:
458
+ all_attentions = all_attentions + (layer_outputs[1],)
459
+
460
+ # Add last layer
461
+ if output_hidden_states:
462
+ all_hidden_states = all_hidden_states + (hidden_states,)
463
+
464
+ if not return_dict:
465
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
466
+
467
+ return TFBaseModelOutput(
468
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
469
+ )
470
+
471
+
472
+ def build_relative_position(query_size, key_size):
473
+ """
474
+ Build relative position according to the query and key
475
+
476
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
477
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
478
+ P_k\\)
479
+
480
+ Args:
481
+ query_size (int): the length of query
482
+ key_size (int): the length of key
483
+
484
+ Return:
485
+ `tf.Tensor`: A tensor with shape [1, query_size, key_size]
486
+
487
+ """
488
+ q_ids = tf.range(query_size, dtype=tf.int32)
489
+ k_ids = tf.range(key_size, dtype=tf.int32)
490
+ rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
491
+ rel_pos_ids = rel_pos_ids[:query_size, :]
492
+ rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
493
+ return tf.cast(rel_pos_ids, tf.int64)
494
+
495
+
496
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
497
+ shapes = [
498
+ shape_list(query_layer)[0],
499
+ shape_list(query_layer)[1],
500
+ shape_list(query_layer)[2],
501
+ shape_list(relative_pos)[-1],
502
+ ]
503
+ return tf.broadcast_to(c2p_pos, shapes)
504
+
505
+
506
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
507
+ shapes = [
508
+ shape_list(query_layer)[0],
509
+ shape_list(query_layer)[1],
510
+ shape_list(key_layer)[-2],
511
+ shape_list(key_layer)[-2],
512
+ ]
513
+ return tf.broadcast_to(c2p_pos, shapes)
514
+
515
+
516
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
517
+ shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
518
+ return tf.broadcast_to(pos_index, shapes)
519
+
520
+
521
+ def torch_gather(x, indices, gather_axis):
522
+ if gather_axis < 0:
523
+ gather_axis = tf.rank(x) + gather_axis
524
+
525
+ if gather_axis != tf.rank(x) - 1:
526
+ pre_roll = tf.rank(x) - 1 - gather_axis
527
+ permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)
528
+ x = tf.transpose(x, perm=permutation)
529
+ indices = tf.transpose(indices, perm=permutation)
530
+ else:
531
+ pre_roll = 0
532
+
533
+ flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))
534
+ flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))
535
+ gathered = tf.gather(flat_x, flat_indices, batch_dims=1)
536
+ gathered = tf.reshape(gathered, tf.shape(indices))
537
+
538
+ if pre_roll != 0:
539
+ permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)
540
+ gathered = tf.transpose(gathered, perm=permutation)
541
+
542
+ return gathered
543
+
544
+
545
+ class TFDebertaDisentangledSelfAttention(keras.layers.Layer):
546
+ """
547
+ Disentangled self-attention module
548
+
549
+ Parameters:
550
+ config (`str`):
551
+ A model config class instance with the configuration to build a new model. The schema is similar to
552
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
553
+
554
+ """
555
+
556
+ def __init__(self, config: DebertaConfig, **kwargs):
557
+ super().__init__(**kwargs)
558
+ if config.hidden_size % config.num_attention_heads != 0:
559
+ raise ValueError(
560
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
561
+ f"heads ({config.num_attention_heads})"
562
+ )
563
+ self.num_attention_heads = config.num_attention_heads
564
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
565
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
566
+ self.in_proj = keras.layers.Dense(
567
+ self.all_head_size * 3,
568
+ kernel_initializer=get_initializer(config.initializer_range),
569
+ name="in_proj",
570
+ use_bias=False,
571
+ )
572
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
573
+
574
+ self.relative_attention = getattr(config, "relative_attention", False)
575
+ self.talking_head = getattr(config, "talking_head", False)
576
+
577
+ if self.talking_head:
578
+ self.head_logits_proj = keras.layers.Dense(
579
+ self.num_attention_heads,
580
+ kernel_initializer=get_initializer(config.initializer_range),
581
+ name="head_logits_proj",
582
+ use_bias=False,
583
+ )
584
+ self.head_weights_proj = keras.layers.Dense(
585
+ self.num_attention_heads,
586
+ kernel_initializer=get_initializer(config.initializer_range),
587
+ name="head_weights_proj",
588
+ use_bias=False,
589
+ )
590
+
591
+ self.softmax = TFDebertaXSoftmax(axis=-1)
592
+
593
+ if self.relative_attention:
594
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
595
+ if self.max_relative_positions < 1:
596
+ self.max_relative_positions = config.max_position_embeddings
597
+ self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout")
598
+ if "c2p" in self.pos_att_type:
599
+ self.pos_proj = keras.layers.Dense(
600
+ self.all_head_size,
601
+ kernel_initializer=get_initializer(config.initializer_range),
602
+ name="pos_proj",
603
+ use_bias=False,
604
+ )
605
+ if "p2c" in self.pos_att_type:
606
+ self.pos_q_proj = keras.layers.Dense(
607
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj"
608
+ )
609
+
610
+ self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout")
611
+ self.config = config
612
+
613
+ def build(self, input_shape=None):
614
+ if self.built:
615
+ return
616
+ self.built = True
617
+ self.q_bias = self.add_weight(
618
+ name="q_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
619
+ )
620
+ self.v_bias = self.add_weight(
621
+ name="v_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
622
+ )
623
+ if getattr(self, "in_proj", None) is not None:
624
+ with tf.name_scope(self.in_proj.name):
625
+ self.in_proj.build([None, None, self.config.hidden_size])
626
+ if getattr(self, "dropout", None) is not None:
627
+ with tf.name_scope(self.dropout.name):
628
+ self.dropout.build(None)
629
+ if getattr(self, "head_logits_proj", None) is not None:
630
+ with tf.name_scope(self.head_logits_proj.name):
631
+ self.head_logits_proj.build(None)
632
+ if getattr(self, "head_weights_proj", None) is not None:
633
+ with tf.name_scope(self.head_weights_proj.name):
634
+ self.head_weights_proj.build(None)
635
+ if getattr(self, "pos_dropout", None) is not None:
636
+ with tf.name_scope(self.pos_dropout.name):
637
+ self.pos_dropout.build(None)
638
+ if getattr(self, "pos_proj", None) is not None:
639
+ with tf.name_scope(self.pos_proj.name):
640
+ self.pos_proj.build([self.config.hidden_size])
641
+ if getattr(self, "pos_q_proj", None) is not None:
642
+ with tf.name_scope(self.pos_q_proj.name):
643
+ self.pos_q_proj.build([self.config.hidden_size])
644
+
645
+ def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor:
646
+ shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1]
647
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
648
+ tensor = tf.reshape(tensor=tensor, shape=shape)
649
+
650
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
651
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
652
+
653
+ def call(
654
+ self,
655
+ hidden_states: tf.Tensor,
656
+ attention_mask: tf.Tensor,
657
+ query_states: tf.Tensor = None,
658
+ relative_pos: tf.Tensor = None,
659
+ rel_embeddings: tf.Tensor = None,
660
+ output_attentions: bool = False,
661
+ training: bool = False,
662
+ ) -> Tuple[tf.Tensor]:
663
+ """
664
+ Call the module
665
+
666
+ Args:
667
+ hidden_states (`tf.Tensor`):
668
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
669
+ *Attention(Q,K,V)*
670
+
671
+ attention_mask (`tf.Tensor`):
672
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
673
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
674
+ th token.
675
+
676
+ return_att (`bool`, optional):
677
+ Whether return the attention matrix.
678
+
679
+ query_states (`tf.Tensor`, optional):
680
+ The *Q* state in *Attention(Q,K,V)*.
681
+
682
+ relative_pos (`tf.Tensor`):
683
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
684
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
685
+
686
+ rel_embeddings (`tf.Tensor`):
687
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
688
+ \\text{max_relative_positions}\\), *hidden_size*].
689
+
690
+
691
+ """
692
+ if query_states is None:
693
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
694
+ query_layer, key_layer, value_layer = tf.split(
695
+ self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1
696
+ )
697
+ else:
698
+
699
+ def linear(w, b, x):
700
+ out = tf.matmul(x, w, transpose_b=True)
701
+ if b is not None:
702
+ out += tf.transpose(b)
703
+ return out
704
+
705
+ ws = tf.split(
706
+ tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0
707
+ )
708
+ qkvw = tf.TensorArray(dtype=tf.float32, size=3)
709
+ for k in tf.range(3):
710
+ qkvw_inside = tf.TensorArray(dtype=tf.float32, size=self.num_attention_heads)
711
+ for i in tf.range(self.num_attention_heads):
712
+ qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k])
713
+ qkvw = qkvw.write(k, qkvw_inside.concat())
714
+ qkvb = [None] * 3
715
+
716
+ q = linear(qkvw[0], qkvb[0], query_states)
717
+ k = linear(qkvw[1], qkvb[1], hidden_states)
718
+ v = linear(qkvw[2], qkvb[2], hidden_states)
719
+ query_layer = self.transpose_for_scores(q)
720
+ key_layer = self.transpose_for_scores(k)
721
+ value_layer = self.transpose_for_scores(v)
722
+
723
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
724
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
725
+
726
+ rel_att = None
727
+ # Take the dot product between "query" and "key" to get the raw attention scores.
728
+ scale_factor = 1 + len(self.pos_att_type)
729
+ scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor)
730
+ query_layer = query_layer / scale
731
+
732
+ attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
733
+ if self.relative_attention:
734
+ rel_embeddings = self.pos_dropout(rel_embeddings, training=training)
735
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
736
+
737
+ if rel_att is not None:
738
+ attention_scores = attention_scores + rel_att
739
+
740
+ if self.talking_head:
741
+ attention_scores = tf.transpose(
742
+ self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2]
743
+ )
744
+
745
+ attention_probs = self.softmax(attention_scores, attention_mask)
746
+ attention_probs = self.dropout(attention_probs, training=training)
747
+ if self.talking_head:
748
+ attention_probs = tf.transpose(
749
+ self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2]
750
+ )
751
+
752
+ context_layer = tf.matmul(attention_probs, value_layer)
753
+ context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
754
+ context_layer_shape = shape_list(context_layer)
755
+ # Set the final dimension here explicitly.
756
+ # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
757
+ # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
758
+ # requires final input dimension to be defined
759
+ new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
760
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
761
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
762
+ return outputs
763
+
764
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
765
+ if relative_pos is None:
766
+ q = shape_list(query_layer)[-2]
767
+ relative_pos = build_relative_position(q, shape_list(key_layer)[-2])
768
+ shape_list_pos = shape_list(relative_pos)
769
+ if len(shape_list_pos) == 2:
770
+ relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
771
+ elif len(shape_list_pos) == 3:
772
+ relative_pos = tf.expand_dims(relative_pos, 1)
773
+ # bxhxqxk
774
+ elif len(shape_list_pos) != 4:
775
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
776
+
777
+ att_span = tf.cast(
778
+ tf.minimum(
779
+ tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions
780
+ ),
781
+ tf.int64,
782
+ )
783
+ rel_embeddings = tf.expand_dims(
784
+ rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0
785
+ )
786
+
787
+ score = 0
788
+
789
+ # content->position
790
+ if "c2p" in self.pos_att_type:
791
+ pos_key_layer = self.pos_proj(rel_embeddings)
792
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
793
+ c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2]))
794
+ c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
795
+ c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1)
796
+ score += c2p_att
797
+
798
+ # position->content
799
+ if "p2c" in self.pos_att_type:
800
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
801
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
802
+ pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=tf.float32))
803
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
804
+ r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2])
805
+ else:
806
+ r_pos = relative_pos
807
+ p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
808
+ p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2]))
809
+ p2c_att = tf.transpose(
810
+ torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2]
811
+ )
812
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
813
+ pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1)
814
+ p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2)
815
+ score += p2c_att
816
+
817
+ return score
818
+
819
+
820
+ class TFDebertaEmbeddings(keras.layers.Layer):
821
+ """Construct the embeddings from word, position and token_type embeddings."""
822
+
823
+ def __init__(self, config, **kwargs):
824
+ super().__init__(**kwargs)
825
+
826
+ self.config = config
827
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
828
+ self.hidden_size = config.hidden_size
829
+ self.max_position_embeddings = config.max_position_embeddings
830
+ self.position_biased_input = getattr(config, "position_biased_input", True)
831
+ self.initializer_range = config.initializer_range
832
+ if self.embedding_size != config.hidden_size:
833
+ self.embed_proj = keras.layers.Dense(
834
+ config.hidden_size,
835
+ kernel_initializer=get_initializer(config.initializer_range),
836
+ name="embed_proj",
837
+ use_bias=False,
838
+ )
839
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
840
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
841
+
842
+ def build(self, input_shape=None):
843
+ with tf.name_scope("word_embeddings"):
844
+ self.weight = self.add_weight(
845
+ name="weight",
846
+ shape=[self.config.vocab_size, self.embedding_size],
847
+ initializer=get_initializer(self.initializer_range),
848
+ )
849
+
850
+ with tf.name_scope("token_type_embeddings"):
851
+ if self.config.type_vocab_size > 0:
852
+ self.token_type_embeddings = self.add_weight(
853
+ name="embeddings",
854
+ shape=[self.config.type_vocab_size, self.embedding_size],
855
+ initializer=get_initializer(self.initializer_range),
856
+ )
857
+ else:
858
+ self.token_type_embeddings = None
859
+
860
+ with tf.name_scope("position_embeddings"):
861
+ if self.position_biased_input:
862
+ self.position_embeddings = self.add_weight(
863
+ name="embeddings",
864
+ shape=[self.max_position_embeddings, self.hidden_size],
865
+ initializer=get_initializer(self.initializer_range),
866
+ )
867
+ else:
868
+ self.position_embeddings = None
869
+
870
+ if self.built:
871
+ return
872
+ self.built = True
873
+ if getattr(self, "LayerNorm", None) is not None:
874
+ with tf.name_scope(self.LayerNorm.name):
875
+ self.LayerNorm.build([None, None, self.config.hidden_size])
876
+ if getattr(self, "dropout", None) is not None:
877
+ with tf.name_scope(self.dropout.name):
878
+ self.dropout.build(None)
879
+ if getattr(self, "embed_proj", None) is not None:
880
+ with tf.name_scope(self.embed_proj.name):
881
+ self.embed_proj.build([None, None, self.embedding_size])
882
+
883
+ def call(
884
+ self,
885
+ input_ids: tf.Tensor = None,
886
+ position_ids: tf.Tensor = None,
887
+ token_type_ids: tf.Tensor = None,
888
+ inputs_embeds: tf.Tensor = None,
889
+ mask: tf.Tensor = None,
890
+ training: bool = False,
891
+ ) -> tf.Tensor:
892
+ """
893
+ Applies embedding based on inputs tensor.
894
+
895
+ Returns:
896
+ final_embeddings (`tf.Tensor`): output embedding tensor.
897
+ """
898
+ if input_ids is None and inputs_embeds is None:
899
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
900
+
901
+ if input_ids is not None:
902
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
903
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
904
+
905
+ input_shape = shape_list(inputs_embeds)[:-1]
906
+
907
+ if token_type_ids is None:
908
+ token_type_ids = tf.fill(dims=input_shape, value=0)
909
+
910
+ if position_ids is None:
911
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
912
+
913
+ final_embeddings = inputs_embeds
914
+ if self.position_biased_input:
915
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
916
+ final_embeddings += position_embeds
917
+ if self.config.type_vocab_size > 0:
918
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
919
+ final_embeddings += token_type_embeds
920
+
921
+ if self.embedding_size != self.hidden_size:
922
+ final_embeddings = self.embed_proj(final_embeddings)
923
+
924
+ final_embeddings = self.LayerNorm(final_embeddings)
925
+
926
+ if mask is not None:
927
+ if len(shape_list(mask)) != len(shape_list(final_embeddings)):
928
+ if len(shape_list(mask)) == 4:
929
+ mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
930
+ mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
931
+
932
+ final_embeddings = final_embeddings * mask
933
+
934
+ final_embeddings = self.dropout(final_embeddings, training=training)
935
+
936
+ return final_embeddings
937
+
938
+
939
+ class TFDebertaPredictionHeadTransform(keras.layers.Layer):
940
+ def __init__(self, config: DebertaConfig, **kwargs):
941
+ super().__init__(**kwargs)
942
+
943
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
944
+
945
+ self.dense = keras.layers.Dense(
946
+ units=self.embedding_size,
947
+ kernel_initializer=get_initializer(config.initializer_range),
948
+ name="dense",
949
+ )
950
+
951
+ if isinstance(config.hidden_act, str):
952
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
953
+ else:
954
+ self.transform_act_fn = config.hidden_act
955
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
956
+ self.config = config
957
+
958
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
959
+ hidden_states = self.dense(inputs=hidden_states)
960
+ hidden_states = self.transform_act_fn(hidden_states)
961
+ hidden_states = self.LayerNorm(hidden_states)
962
+
963
+ return hidden_states
964
+
965
+ def build(self, input_shape=None):
966
+ if self.built:
967
+ return
968
+ self.built = True
969
+ if getattr(self, "dense", None) is not None:
970
+ with tf.name_scope(self.dense.name):
971
+ self.dense.build([None, None, self.config.hidden_size])
972
+ if getattr(self, "LayerNorm", None) is not None:
973
+ with tf.name_scope(self.LayerNorm.name):
974
+ self.LayerNorm.build([None, None, self.embedding_size])
975
+
976
+
977
+ class TFDebertaLMPredictionHead(keras.layers.Layer):
978
+ def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
979
+ super().__init__(**kwargs)
980
+
981
+ self.config = config
982
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
983
+
984
+ self.transform = TFDebertaPredictionHeadTransform(config, name="transform")
985
+
986
+ # The output weights are the same as the input embeddings, but there is
987
+ # an output-only bias for each token.
988
+ self.input_embeddings = input_embeddings
989
+
990
+ def build(self, input_shape=None):
991
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
992
+
993
+ if self.built:
994
+ return
995
+ self.built = True
996
+ if getattr(self, "transform", None) is not None:
997
+ with tf.name_scope(self.transform.name):
998
+ self.transform.build(None)
999
+
1000
+ def get_output_embeddings(self) -> keras.layers.Layer:
1001
+ return self.input_embeddings
1002
+
1003
+ def set_output_embeddings(self, value: tf.Variable):
1004
+ self.input_embeddings.weight = value
1005
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1006
+
1007
+ def get_bias(self) -> Dict[str, tf.Variable]:
1008
+ return {"bias": self.bias}
1009
+
1010
+ def set_bias(self, value: tf.Variable):
1011
+ self.bias = value["bias"]
1012
+ self.config.vocab_size = shape_list(value["bias"])[0]
1013
+
1014
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1015
+ hidden_states = self.transform(hidden_states=hidden_states)
1016
+ seq_length = shape_list(hidden_states)[1]
1017
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
1018
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1019
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1020
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1021
+
1022
+ return hidden_states
1023
+
1024
+
1025
+ class TFDebertaOnlyMLMHead(keras.layers.Layer):
1026
+ def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
1027
+ super().__init__(**kwargs)
1028
+ self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions")
1029
+
1030
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1031
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1032
+
1033
+ return prediction_scores
1034
+
1035
+ def build(self, input_shape=None):
1036
+ if self.built:
1037
+ return
1038
+ self.built = True
1039
+ if getattr(self, "predictions", None) is not None:
1040
+ with tf.name_scope(self.predictions.name):
1041
+ self.predictions.build(None)
1042
+
1043
+
1044
+ # @keras_serializable
1045
+ class TFDebertaMainLayer(keras.layers.Layer):
1046
+ config_class = DebertaConfig
1047
+
1048
+ def __init__(self, config: DebertaConfig, **kwargs):
1049
+ super().__init__(**kwargs)
1050
+
1051
+ self.config = config
1052
+
1053
+ self.embeddings = TFDebertaEmbeddings(config, name="embeddings")
1054
+ self.encoder = TFDebertaEncoder(config, name="encoder")
1055
+
1056
+ def get_input_embeddings(self) -> keras.layers.Layer:
1057
+ return self.embeddings
1058
+
1059
+ def set_input_embeddings(self, value: tf.Variable):
1060
+ self.embeddings.weight = value
1061
+ self.embeddings.vocab_size = shape_list(value)[0]
1062
+
1063
+ def _prune_heads(self, heads_to_prune):
1064
+ """
1065
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1066
+ class PreTrainedModel
1067
+ """
1068
+ raise NotImplementedError
1069
+
1070
+ @unpack_inputs
1071
+ def call(
1072
+ self,
1073
+ input_ids: TFModelInputType | None = None,
1074
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1075
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1076
+ position_ids: np.ndarray | tf.Tensor | None = None,
1077
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1078
+ output_attentions: Optional[bool] = None,
1079
+ output_hidden_states: Optional[bool] = None,
1080
+ return_dict: Optional[bool] = None,
1081
+ training: bool = False,
1082
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1083
+ if input_ids is not None and inputs_embeds is not None:
1084
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1085
+ elif input_ids is not None:
1086
+ input_shape = shape_list(input_ids)
1087
+ elif inputs_embeds is not None:
1088
+ input_shape = shape_list(inputs_embeds)[:-1]
1089
+ else:
1090
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1091
+
1092
+ if attention_mask is None:
1093
+ attention_mask = tf.fill(dims=input_shape, value=1)
1094
+
1095
+ if token_type_ids is None:
1096
+ token_type_ids = tf.fill(dims=input_shape, value=0)
1097
+
1098
+ embedding_output = self.embeddings(
1099
+ input_ids=input_ids,
1100
+ position_ids=position_ids,
1101
+ token_type_ids=token_type_ids,
1102
+ inputs_embeds=inputs_embeds,
1103
+ mask=attention_mask,
1104
+ training=training,
1105
+ )
1106
+
1107
+ encoder_outputs = self.encoder(
1108
+ hidden_states=embedding_output,
1109
+ attention_mask=attention_mask,
1110
+ output_attentions=output_attentions,
1111
+ output_hidden_states=output_hidden_states,
1112
+ return_dict=return_dict,
1113
+ training=training,
1114
+ )
1115
+
1116
+ sequence_output = encoder_outputs[0]
1117
+
1118
+ if not return_dict:
1119
+ return (sequence_output,) + encoder_outputs[1:]
1120
+
1121
+ return TFBaseModelOutput(
1122
+ last_hidden_state=sequence_output,
1123
+ hidden_states=encoder_outputs.hidden_states,
1124
+ attentions=encoder_outputs.attentions,
1125
+ )
1126
+
1127
+ def build(self, input_shape=None):
1128
+ if self.built:
1129
+ return
1130
+ self.built = True
1131
+ if getattr(self, "embeddings", None) is not None:
1132
+ with tf.name_scope(self.embeddings.name):
1133
+ self.embeddings.build(None)
1134
+ if getattr(self, "encoder", None) is not None:
1135
+ with tf.name_scope(self.encoder.name):
1136
+ self.encoder.build(None)
1137
+
1138
+
1139
+ class TFDebertaPreTrainedModel(TFPreTrainedModel):
1140
+ """
1141
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1142
+ models.
1143
+ """
1144
+
1145
+ config_class = DebertaConfig
1146
+ base_model_prefix = "deberta"
1147
+
1148
+
1149
+ DEBERTA_START_DOCSTRING = r"""
1150
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
1151
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
1152
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
1153
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
1154
+
1155
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1156
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1157
+ behavior.
1158
+
1159
+ <Tip>
1160
+
1161
+ TensorFlow models and layers in `transformers` accept two formats as input:
1162
+
1163
+ - having all inputs as keyword arguments (like PyTorch models), or
1164
+ - having all inputs as a list, tuple or dict in the first positional argument.
1165
+
1166
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1167
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1168
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1169
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1170
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1171
+ positional argument:
1172
+
1173
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1174
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1175
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1176
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1177
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1178
+
1179
+ Note that when creating models and layers with
1180
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1181
+ about any of this, as you can just pass inputs like you would to any other Python function!
1182
+
1183
+ </Tip>
1184
+
1185
+ Parameters:
1186
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
1187
+ Initializing with a config file does not load the weights associated with the model, only the
1188
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1189
+ """
1190
+
1191
+ DEBERTA_INPUTS_DOCSTRING = r"""
1192
+ Args:
1193
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1194
+ Indices of input sequence tokens in the vocabulary.
1195
+
1196
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1197
+ [`PreTrainedTokenizer.__call__`] for details.
1198
+
1199
+ [What are input IDs?](../glossary#input-ids)
1200
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1201
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1202
+
1203
+ - 1 for tokens that are **not masked**,
1204
+ - 0 for tokens that are **masked**.
1205
+
1206
+ [What are attention masks?](../glossary#attention-mask)
1207
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1208
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1209
+ 1]`:
1210
+
1211
+ - 0 corresponds to a *sentence A* token,
1212
+ - 1 corresponds to a *sentence B* token.
1213
+
1214
+ [What are token type IDs?](../glossary#token-type-ids)
1215
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1216
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1217
+ config.max_position_embeddings - 1]`.
1218
+
1219
+ [What are position IDs?](../glossary#position-ids)
1220
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1221
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1222
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
1223
+ model's internal embedding lookup matrix.
1224
+ output_attentions (`bool`, *optional*):
1225
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1226
+ tensors for more detail.
1227
+ output_hidden_states (`bool`, *optional*):
1228
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1229
+ more detail.
1230
+ return_dict (`bool`, *optional*):
1231
+ Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
1232
+ """
1233
+
1234
+
1235
+ @add_start_docstrings(
1236
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
1237
+ DEBERTA_START_DOCSTRING,
1238
+ )
1239
+ class TFDebertaModel(TFDebertaPreTrainedModel):
1240
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1241
+ super().__init__(config, *inputs, **kwargs)
1242
+
1243
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1244
+
1245
+ @unpack_inputs
1246
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1247
+ @add_code_sample_docstrings(
1248
+ checkpoint=_CHECKPOINT_FOR_DOC,
1249
+ output_type=TFBaseModelOutput,
1250
+ config_class=_CONFIG_FOR_DOC,
1251
+ )
1252
+ def call(
1253
+ self,
1254
+ input_ids: TFModelInputType | None = None,
1255
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1256
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1257
+ position_ids: np.ndarray | tf.Tensor | None = None,
1258
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1259
+ output_attentions: Optional[bool] = None,
1260
+ output_hidden_states: Optional[bool] = None,
1261
+ return_dict: Optional[bool] = None,
1262
+ training: Optional[bool] = False,
1263
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1264
+ outputs = self.deberta(
1265
+ input_ids=input_ids,
1266
+ attention_mask=attention_mask,
1267
+ token_type_ids=token_type_ids,
1268
+ position_ids=position_ids,
1269
+ inputs_embeds=inputs_embeds,
1270
+ output_attentions=output_attentions,
1271
+ output_hidden_states=output_hidden_states,
1272
+ return_dict=return_dict,
1273
+ training=training,
1274
+ )
1275
+
1276
+ return outputs
1277
+
1278
+ def build(self, input_shape=None):
1279
+ if self.built:
1280
+ return
1281
+ self.built = True
1282
+ if getattr(self, "deberta", None) is not None:
1283
+ with tf.name_scope(self.deberta.name):
1284
+ self.deberta.build(None)
1285
+
1286
+
1287
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1288
+ class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss):
1289
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1290
+ super().__init__(config, *inputs, **kwargs)
1291
+
1292
+ if config.is_decoder:
1293
+ logger.warning(
1294
+ "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for "
1295
+ "bi-directional self-attention."
1296
+ )
1297
+
1298
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1299
+ self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
1300
+
1301
+ def get_lm_head(self) -> keras.layers.Layer:
1302
+ return self.mlm.predictions
1303
+
1304
+ @unpack_inputs
1305
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1306
+ @add_code_sample_docstrings(
1307
+ checkpoint=_CHECKPOINT_FOR_DOC,
1308
+ output_type=TFMaskedLMOutput,
1309
+ config_class=_CONFIG_FOR_DOC,
1310
+ )
1311
+ def call(
1312
+ self,
1313
+ input_ids: TFModelInputType | None = None,
1314
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1315
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1316
+ position_ids: np.ndarray | tf.Tensor | None = None,
1317
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1318
+ output_attentions: Optional[bool] = None,
1319
+ output_hidden_states: Optional[bool] = None,
1320
+ return_dict: Optional[bool] = None,
1321
+ labels: np.ndarray | tf.Tensor | None = None,
1322
+ training: Optional[bool] = False,
1323
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1324
+ r"""
1325
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1326
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1327
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1328
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1329
+ """
1330
+ outputs = self.deberta(
1331
+ input_ids=input_ids,
1332
+ attention_mask=attention_mask,
1333
+ token_type_ids=token_type_ids,
1334
+ position_ids=position_ids,
1335
+ inputs_embeds=inputs_embeds,
1336
+ output_attentions=output_attentions,
1337
+ output_hidden_states=output_hidden_states,
1338
+ return_dict=return_dict,
1339
+ training=training,
1340
+ )
1341
+ sequence_output = outputs[0]
1342
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1343
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1344
+
1345
+ if not return_dict:
1346
+ output = (prediction_scores,) + outputs[2:]
1347
+ return ((loss,) + output) if loss is not None else output
1348
+
1349
+ return TFMaskedLMOutput(
1350
+ loss=loss,
1351
+ logits=prediction_scores,
1352
+ hidden_states=outputs.hidden_states,
1353
+ attentions=outputs.attentions,
1354
+ )
1355
+
1356
+ def build(self, input_shape=None):
1357
+ if self.built:
1358
+ return
1359
+ self.built = True
1360
+ if getattr(self, "deberta", None) is not None:
1361
+ with tf.name_scope(self.deberta.name):
1362
+ self.deberta.build(None)
1363
+ if getattr(self, "mlm", None) is not None:
1364
+ with tf.name_scope(self.mlm.name):
1365
+ self.mlm.build(None)
1366
+
1367
+
1368
+ @add_start_docstrings(
1369
+ """
1370
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1371
+ pooled output) e.g. for GLUE tasks.
1372
+ """,
1373
+ DEBERTA_START_DOCSTRING,
1374
+ )
1375
+ class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss):
1376
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1377
+ super().__init__(config, *inputs, **kwargs)
1378
+
1379
+ self.num_labels = config.num_labels
1380
+
1381
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1382
+ self.pooler = TFDebertaContextPooler(config, name="pooler")
1383
+
1384
+ drop_out = getattr(config, "cls_dropout", None)
1385
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1386
+ self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout")
1387
+ self.classifier = keras.layers.Dense(
1388
+ units=config.num_labels,
1389
+ kernel_initializer=get_initializer(config.initializer_range),
1390
+ name="classifier",
1391
+ )
1392
+ self.output_dim = self.pooler.output_dim
1393
+
1394
+ @unpack_inputs
1395
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1396
+ @add_code_sample_docstrings(
1397
+ checkpoint=_CHECKPOINT_FOR_DOC,
1398
+ output_type=TFSequenceClassifierOutput,
1399
+ config_class=_CONFIG_FOR_DOC,
1400
+ )
1401
+ def call(
1402
+ self,
1403
+ input_ids: TFModelInputType | None = None,
1404
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1405
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1406
+ position_ids: np.ndarray | tf.Tensor | None = None,
1407
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1408
+ output_attentions: Optional[bool] = None,
1409
+ output_hidden_states: Optional[bool] = None,
1410
+ return_dict: Optional[bool] = None,
1411
+ labels: np.ndarray | tf.Tensor | None = None,
1412
+ training: Optional[bool] = False,
1413
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1414
+ r"""
1415
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1416
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1417
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1418
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1419
+ """
1420
+ outputs = self.deberta(
1421
+ input_ids=input_ids,
1422
+ attention_mask=attention_mask,
1423
+ token_type_ids=token_type_ids,
1424
+ position_ids=position_ids,
1425
+ inputs_embeds=inputs_embeds,
1426
+ output_attentions=output_attentions,
1427
+ output_hidden_states=output_hidden_states,
1428
+ return_dict=return_dict,
1429
+ training=training,
1430
+ )
1431
+ sequence_output = outputs[0]
1432
+ pooled_output = self.pooler(sequence_output, training=training)
1433
+ pooled_output = self.dropout(pooled_output, training=training)
1434
+ logits = self.classifier(pooled_output)
1435
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1436
+
1437
+ if not return_dict:
1438
+ output = (logits,) + outputs[1:]
1439
+
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return TFSequenceClassifierOutput(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ hidden_states=outputs.hidden_states,
1446
+ attentions=outputs.attentions,
1447
+ )
1448
+
1449
+ def build(self, input_shape=None):
1450
+ if self.built:
1451
+ return
1452
+ self.built = True
1453
+ if getattr(self, "deberta", None) is not None:
1454
+ with tf.name_scope(self.deberta.name):
1455
+ self.deberta.build(None)
1456
+ if getattr(self, "pooler", None) is not None:
1457
+ with tf.name_scope(self.pooler.name):
1458
+ self.pooler.build(None)
1459
+ if getattr(self, "dropout", None) is not None:
1460
+ with tf.name_scope(self.dropout.name):
1461
+ self.dropout.build(None)
1462
+ if getattr(self, "classifier", None) is not None:
1463
+ with tf.name_scope(self.classifier.name):
1464
+ self.classifier.build([None, None, self.output_dim])
1465
+
1466
+
1467
+ @add_start_docstrings(
1468
+ """
1469
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1470
+ Named-Entity-Recognition (NER) tasks.
1471
+ """,
1472
+ DEBERTA_START_DOCSTRING,
1473
+ )
1474
+ class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss):
1475
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1476
+ super().__init__(config, *inputs, **kwargs)
1477
+
1478
+ self.num_labels = config.num_labels
1479
+
1480
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1481
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1482
+ self.classifier = keras.layers.Dense(
1483
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1484
+ )
1485
+ self.config = config
1486
+
1487
+ @unpack_inputs
1488
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1489
+ @add_code_sample_docstrings(
1490
+ checkpoint=_CHECKPOINT_FOR_DOC,
1491
+ output_type=TFTokenClassifierOutput,
1492
+ config_class=_CONFIG_FOR_DOC,
1493
+ )
1494
+ def call(
1495
+ self,
1496
+ input_ids: TFModelInputType | None = None,
1497
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1498
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1499
+ position_ids: np.ndarray | tf.Tensor | None = None,
1500
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1501
+ output_attentions: Optional[bool] = None,
1502
+ output_hidden_states: Optional[bool] = None,
1503
+ return_dict: Optional[bool] = None,
1504
+ labels: np.ndarray | tf.Tensor | None = None,
1505
+ training: Optional[bool] = False,
1506
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1507
+ r"""
1508
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1509
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1510
+ """
1511
+ outputs = self.deberta(
1512
+ input_ids=input_ids,
1513
+ attention_mask=attention_mask,
1514
+ token_type_ids=token_type_ids,
1515
+ position_ids=position_ids,
1516
+ inputs_embeds=inputs_embeds,
1517
+ output_attentions=output_attentions,
1518
+ output_hidden_states=output_hidden_states,
1519
+ return_dict=return_dict,
1520
+ training=training,
1521
+ )
1522
+ sequence_output = outputs[0]
1523
+ sequence_output = self.dropout(sequence_output, training=training)
1524
+ logits = self.classifier(inputs=sequence_output)
1525
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1526
+
1527
+ if not return_dict:
1528
+ output = (logits,) + outputs[1:]
1529
+ return ((loss,) + output) if loss is not None else output
1530
+
1531
+ return TFTokenClassifierOutput(
1532
+ loss=loss,
1533
+ logits=logits,
1534
+ hidden_states=outputs.hidden_states,
1535
+ attentions=outputs.attentions,
1536
+ )
1537
+
1538
+ def build(self, input_shape=None):
1539
+ if self.built:
1540
+ return
1541
+ self.built = True
1542
+ if getattr(self, "deberta", None) is not None:
1543
+ with tf.name_scope(self.deberta.name):
1544
+ self.deberta.build(None)
1545
+ if getattr(self, "classifier", None) is not None:
1546
+ with tf.name_scope(self.classifier.name):
1547
+ self.classifier.build([None, None, self.config.hidden_size])
1548
+
1549
+
1550
+ @add_start_docstrings(
1551
+ """
1552
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1553
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1554
+ """,
1555
+ DEBERTA_START_DOCSTRING,
1556
+ )
1557
+ class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss):
1558
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1559
+ super().__init__(config, *inputs, **kwargs)
1560
+
1561
+ self.num_labels = config.num_labels
1562
+
1563
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1564
+ self.qa_outputs = keras.layers.Dense(
1565
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1566
+ )
1567
+ self.config = config
1568
+
1569
+ @unpack_inputs
1570
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1571
+ @add_code_sample_docstrings(
1572
+ checkpoint=_CHECKPOINT_FOR_DOC,
1573
+ output_type=TFQuestionAnsweringModelOutput,
1574
+ config_class=_CONFIG_FOR_DOC,
1575
+ )
1576
+ def call(
1577
+ self,
1578
+ input_ids: TFModelInputType | None = None,
1579
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1580
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1581
+ position_ids: np.ndarray | tf.Tensor | None = None,
1582
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ return_dict: Optional[bool] = None,
1586
+ start_positions: np.ndarray | tf.Tensor | None = None,
1587
+ end_positions: np.ndarray | tf.Tensor | None = None,
1588
+ training: Optional[bool] = False,
1589
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1590
+ r"""
1591
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1592
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1593
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1594
+ are not taken into account for computing the loss.
1595
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1596
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1597
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1598
+ are not taken into account for computing the loss.
1599
+ """
1600
+ outputs = self.deberta(
1601
+ input_ids=input_ids,
1602
+ attention_mask=attention_mask,
1603
+ token_type_ids=token_type_ids,
1604
+ position_ids=position_ids,
1605
+ inputs_embeds=inputs_embeds,
1606
+ output_attentions=output_attentions,
1607
+ output_hidden_states=output_hidden_states,
1608
+ return_dict=return_dict,
1609
+ training=training,
1610
+ )
1611
+ sequence_output = outputs[0]
1612
+ logits = self.qa_outputs(inputs=sequence_output)
1613
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1614
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1615
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1616
+ loss = None
1617
+
1618
+ if start_positions is not None and end_positions is not None:
1619
+ labels = {"start_position": start_positions}
1620
+ labels["end_position"] = end_positions
1621
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1622
+
1623
+ if not return_dict:
1624
+ output = (start_logits, end_logits) + outputs[2:]
1625
+ return ((loss,) + output) if loss is not None else output
1626
+
1627
+ return TFQuestionAnsweringModelOutput(
1628
+ loss=loss,
1629
+ start_logits=start_logits,
1630
+ end_logits=end_logits,
1631
+ hidden_states=outputs.hidden_states,
1632
+ attentions=outputs.attentions,
1633
+ )
1634
+
1635
+ def build(self, input_shape=None):
1636
+ if self.built:
1637
+ return
1638
+ self.built = True
1639
+ if getattr(self, "deberta", None) is not None:
1640
+ with tf.name_scope(self.deberta.name):
1641
+ self.deberta.build(None)
1642
+ if getattr(self, "qa_outputs", None) is not None:
1643
+ with tf.name_scope(self.qa_outputs.name):
1644
+ self.qa_outputs.build([None, None, self.config.hidden_size])
venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model DeBERTa."""
16
+
17
+ import json
18
+ import os
19
+ from typing import List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
30
+
31
+
32
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
33
+ def bytes_to_unicode():
34
+ """
35
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
36
+ characters the bpe code barfs on.
37
+
38
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
39
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
40
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
41
+ tables between utf-8 bytes and unicode strings.
42
+ """
43
+ bs = (
44
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
45
+ )
46
+ cs = bs[:]
47
+ n = 0
48
+ for b in range(2**8):
49
+ if b not in bs:
50
+ bs.append(b)
51
+ cs.append(2**8 + n)
52
+ n += 1
53
+ cs = [chr(n) for n in cs]
54
+ return dict(zip(bs, cs))
55
+
56
+
57
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
58
+ def get_pairs(word):
59
+ """
60
+ Return set of symbol pairs in a word.
61
+
62
+ Word is represented as tuple of symbols (symbols being variable-length strings).
63
+ """
64
+ pairs = set()
65
+ prev_char = word[0]
66
+ for char in word[1:]:
67
+ pairs.add((prev_char, char))
68
+ prev_char = char
69
+ return pairs
70
+
71
+
72
+ class DebertaTokenizer(PreTrainedTokenizer):
73
+ """
74
+ Construct a DeBERTa tokenizer. Based on byte-level Byte-Pair-Encoding.
75
+
76
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
77
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
78
+
79
+ ```python
80
+ >>> from transformers import DebertaTokenizer
81
+
82
+ >>> tokenizer = DebertaTokenizer.from_pretrained("microsoft/deberta-base")
83
+ >>> tokenizer("Hello world")["input_ids"]
84
+ [1, 31414, 232, 2]
85
+
86
+ >>> tokenizer(" Hello world")["input_ids"]
87
+ [1, 20920, 232, 2]
88
+ ```
89
+
90
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
91
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
92
+
93
+ <Tip>
94
+
95
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
96
+
97
+ </Tip>
98
+
99
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
100
+ this superclass for more information regarding those methods.
101
+
102
+ Args:
103
+ vocab_file (`str`):
104
+ Path to the vocabulary file.
105
+ merges_file (`str`):
106
+ Path to the merges file.
107
+ errors (`str`, *optional*, defaults to `"replace"`):
108
+ Paradigm to follow when decoding bytes to UTF-8. See
109
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
110
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
111
+ The beginning of sequence token.
112
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
113
+ The end of sequence token.
114
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
115
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
116
+ sequence classification or for a text and a question for question answering. It is also used as the last
117
+ token of a sequence built with special tokens.
118
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
119
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
120
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
121
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
122
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
123
+ token instead.
124
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
125
+ The token used for padding, for example when batching sequences of different lengths.
126
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
127
+ The token used for masking values. This is the token used when training this model with masked language
128
+ modeling. This is the token which the model will try to predict.
129
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
130
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
131
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
132
+ add_bos_token (`bool`, *optional*, defaults to `False`):
133
+ Whether or not to add an initial <|endoftext|> to the input. This allows to treat the leading word just as
134
+ any other word.
135
+ """
136
+
137
+ vocab_files_names = VOCAB_FILES_NAMES
138
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
139
+
140
+ def __init__(
141
+ self,
142
+ vocab_file,
143
+ merges_file,
144
+ errors="replace",
145
+ bos_token="[CLS]",
146
+ eos_token="[SEP]",
147
+ sep_token="[SEP]",
148
+ cls_token="[CLS]",
149
+ unk_token="[UNK]",
150
+ pad_token="[PAD]",
151
+ mask_token="[MASK]",
152
+ add_prefix_space=False,
153
+ add_bos_token=False,
154
+ **kwargs,
155
+ ):
156
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
157
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
158
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
159
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
160
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
161
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
162
+
163
+ # Mask token behave like a normal word, i.e. include the space before it
164
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
165
+ self.add_bos_token = add_bos_token
166
+
167
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
168
+ self.encoder = json.load(vocab_handle)
169
+ self.decoder = {v: k for k, v in self.encoder.items()}
170
+ self.errors = errors # how to handle errors in decoding
171
+ self.byte_encoder = bytes_to_unicode()
172
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
173
+ with open(merges_file, encoding="utf-8") as merges_handle:
174
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
175
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
176
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
177
+ self.cache = {}
178
+ self.add_prefix_space = add_prefix_space
179
+
180
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
181
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
182
+
183
+ super().__init__(
184
+ errors=errors,
185
+ bos_token=bos_token,
186
+ eos_token=eos_token,
187
+ unk_token=unk_token,
188
+ sep_token=sep_token,
189
+ cls_token=cls_token,
190
+ pad_token=pad_token,
191
+ mask_token=mask_token,
192
+ add_prefix_space=add_prefix_space,
193
+ add_bos_token=add_bos_token,
194
+ **kwargs,
195
+ )
196
+
197
+ @property
198
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.vocab_size
199
+ def vocab_size(self):
200
+ return len(self.encoder)
201
+
202
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
203
+ def get_vocab(self):
204
+ return dict(self.encoder, **self.added_tokens_encoder)
205
+
206
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
207
+ def bpe(self, token):
208
+ if token in self.cache:
209
+ return self.cache[token]
210
+ word = tuple(token)
211
+ pairs = get_pairs(word)
212
+
213
+ if not pairs:
214
+ return token
215
+
216
+ while True:
217
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
218
+ if bigram not in self.bpe_ranks:
219
+ break
220
+ first, second = bigram
221
+ new_word = []
222
+ i = 0
223
+ while i < len(word):
224
+ try:
225
+ j = word.index(first, i)
226
+ except ValueError:
227
+ new_word.extend(word[i:])
228
+ break
229
+ else:
230
+ new_word.extend(word[i:j])
231
+ i = j
232
+
233
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
234
+ new_word.append(first + second)
235
+ i += 2
236
+ else:
237
+ new_word.append(word[i])
238
+ i += 1
239
+ new_word = tuple(new_word)
240
+ word = new_word
241
+ if len(word) == 1:
242
+ break
243
+ else:
244
+ pairs = get_pairs(word)
245
+ word = " ".join(word)
246
+ self.cache[token] = word
247
+ return word
248
+
249
+ def build_inputs_with_special_tokens(
250
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
251
+ ) -> List[int]:
252
+ """
253
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
254
+ adding special tokens. A DeBERTa sequence has the following format:
255
+
256
+ - single sequence: [CLS] X [SEP]
257
+ - pair of sequences: [CLS] A [SEP] B [SEP]
258
+
259
+ Args:
260
+ token_ids_0 (`List[int]`):
261
+ List of IDs to which the special tokens will be added.
262
+ token_ids_1 (`List[int]`, *optional*):
263
+ Optional second list of IDs for sequence pairs.
264
+
265
+ Returns:
266
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
267
+ """
268
+ if token_ids_1 is None:
269
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
270
+ cls = [self.cls_token_id]
271
+ sep = [self.sep_token_id]
272
+ return cls + token_ids_0 + sep + token_ids_1 + sep
273
+
274
+ def get_special_tokens_mask(
275
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
276
+ ) -> List[int]:
277
+ """
278
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
279
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
280
+
281
+ Args:
282
+ token_ids_0 (`List[int]`):
283
+ List of IDs.
284
+ token_ids_1 (`List[int]`, *optional*):
285
+ Optional second list of IDs for sequence pairs.
286
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
287
+ Whether or not the token list is already formatted with special tokens for the model.
288
+
289
+ Returns:
290
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
291
+ """
292
+ if already_has_special_tokens:
293
+ return super().get_special_tokens_mask(
294
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
295
+ )
296
+
297
+ if token_ids_1 is None:
298
+ return [1] + ([0] * len(token_ids_0)) + [1]
299
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
300
+
301
+ def create_token_type_ids_from_sequences(
302
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
+ ) -> List[int]:
304
+ """
305
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
306
+ sequence pair mask has the following format:
307
+
308
+ ```
309
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
310
+ | first sequence | second sequence |
311
+ ```
312
+
313
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
314
+
315
+ Args:
316
+ token_ids_0 (`List[int]`):
317
+ List of IDs.
318
+ token_ids_1 (`List[int]`, *optional*):
319
+ Optional second list of IDs for sequence pairs.
320
+
321
+ Returns:
322
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
323
+ """
324
+ sep = [self.sep_token_id]
325
+ cls = [self.cls_token_id]
326
+
327
+ if token_ids_1 is None:
328
+ return len(cls + token_ids_0 + sep) * [0]
329
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
330
+
331
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
332
+ def _tokenize(self, text):
333
+ """Tokenize a string."""
334
+ bpe_tokens = []
335
+ for token in re.findall(self.pat, text):
336
+ token = "".join(
337
+ self.byte_encoder[b] for b in token.encode("utf-8")
338
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
339
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
340
+ return bpe_tokens
341
+
342
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
343
+ def _convert_token_to_id(self, token):
344
+ """Converts a token (str) in an id using the vocab."""
345
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
346
+
347
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
348
+ def _convert_id_to_token(self, index):
349
+ """Converts an index (integer) in a token (str) using the vocab."""
350
+ return self.decoder.get(index)
351
+
352
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
353
+ def convert_tokens_to_string(self, tokens):
354
+ """Converts a sequence of tokens (string) in a single string."""
355
+ text = "".join(tokens)
356
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
357
+ return text
358
+
359
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
360
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
361
+ if not os.path.isdir(save_directory):
362
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
363
+ return
364
+ vocab_file = os.path.join(
365
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
366
+ )
367
+ merge_file = os.path.join(
368
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
369
+ )
370
+
371
+ with open(vocab_file, "w", encoding="utf-8") as f:
372
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
373
+
374
+ index = 0
375
+ with open(merge_file, "w", encoding="utf-8") as writer:
376
+ writer.write("#version: 0.2\n")
377
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
378
+ if index != token_index:
379
+ logger.warning(
380
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
381
+ " Please check that the tokenizer is not corrupted!"
382
+ )
383
+ index = token_index
384
+ writer.write(" ".join(bpe_tokens) + "\n")
385
+ index += 1
386
+
387
+ return vocab_file, merge_file
388
+
389
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
390
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
391
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
392
+ text = " " + text
393
+ return (text, kwargs)
venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Fast Tokenization class for model DeBERTa."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import pre_tokenizers
21
+
22
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import logging
25
+ from .tokenization_deberta import DebertaTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+
33
+ class DebertaTokenizerFast(PreTrainedTokenizerFast):
34
+ """
35
+ Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
36
+ Byte-Pair-Encoding.
37
+
38
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
39
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
40
+
41
+ ```python
42
+ >>> from transformers import DebertaTokenizerFast
43
+
44
+ >>> tokenizer = DebertaTokenizerFast.from_pretrained("microsoft/deberta-base")
45
+ >>> tokenizer("Hello world")["input_ids"]
46
+ [1, 31414, 232, 2]
47
+
48
+ >>> tokenizer(" Hello world")["input_ids"]
49
+ [1, 20920, 232, 2]
50
+ ```
51
+
52
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
53
+ the model was not pretrained this way, it might yield a decrease in performance.
54
+
55
+ <Tip>
56
+
57
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
58
+
59
+ </Tip>
60
+
61
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
62
+ refer to this superclass for more information regarding those methods.
63
+
64
+ Args:
65
+ vocab_file (`str`, *optional*):
66
+ Path to the vocabulary file.
67
+ merges_file (`str`, *optional*):
68
+ Path to the merges file.
69
+ tokenizer_file (`str`, *optional*):
70
+ The path to a tokenizer file to use instead of the vocab file.
71
+ errors (`str`, *optional*, defaults to `"replace"`):
72
+ Paradigm to follow when decoding bytes to UTF-8. See
73
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
74
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
75
+ The beginning of sequence token.
76
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ The end of sequence token.
78
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
79
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
80
+ sequence classification or for a text and a question for question answering. It is also used as the last
81
+ token of a sequence built with special tokens.
82
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
83
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
84
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
85
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
86
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
87
+ token instead.
88
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
89
+ The token used for padding, for example when batching sequences of different lengths.
90
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
91
+ The token used for masking values. This is the token used when training this model with masked language
92
+ modeling. This is the token which the model will try to predict.
93
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
94
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
95
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
96
+ """
97
+
98
+ vocab_files_names = VOCAB_FILES_NAMES
99
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
100
+ slow_tokenizer_class = DebertaTokenizer
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_file=None,
105
+ merges_file=None,
106
+ tokenizer_file=None,
107
+ errors="replace",
108
+ bos_token="[CLS]",
109
+ eos_token="[SEP]",
110
+ sep_token="[SEP]",
111
+ cls_token="[CLS]",
112
+ unk_token="[UNK]",
113
+ pad_token="[PAD]",
114
+ mask_token="[MASK]",
115
+ add_prefix_space=False,
116
+ **kwargs,
117
+ ):
118
+ super().__init__(
119
+ vocab_file,
120
+ merges_file,
121
+ tokenizer_file=tokenizer_file,
122
+ errors=errors,
123
+ bos_token=bos_token,
124
+ eos_token=eos_token,
125
+ unk_token=unk_token,
126
+ sep_token=sep_token,
127
+ cls_token=cls_token,
128
+ pad_token=pad_token,
129
+ mask_token=mask_token,
130
+ add_prefix_space=add_prefix_space,
131
+ **kwargs,
132
+ )
133
+ self.add_bos_token = kwargs.pop("add_bos_token", False)
134
+
135
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
136
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
137
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
138
+ pre_tok_state["add_prefix_space"] = add_prefix_space
139
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
140
+
141
+ self.add_prefix_space = add_prefix_space
142
+
143
+ @property
144
+ def mask_token(self) -> str:
145
+ """
146
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
147
+ having been set.
148
+
149
+ Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
150
+ comprise the space before the *[MASK]*.
151
+ """
152
+ if self._mask_token is None:
153
+ if self.verbose:
154
+ logger.error("Using mask_token, but it is not set yet.")
155
+ return None
156
+ return str(self._mask_token)
157
+
158
+ @mask_token.setter
159
+ def mask_token(self, value):
160
+ """
161
+ Overriding the default behavior of the mask token to have it eat the space before it.
162
+ """
163
+ # Mask token behave like a normal word, i.e. include the space before it
164
+ # So we set lstrip to True
165
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
166
+ self._mask_token = value
167
+
168
+ def build_inputs_with_special_tokens(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
173
+ adding special tokens. A DeBERTa sequence has the following format:
174
+
175
+ - single sequence: [CLS] X [SEP]
176
+ - pair of sequences: [CLS] A [SEP] B [SEP]
177
+
178
+ Args:
179
+ token_ids_0 (`List[int]`):
180
+ List of IDs to which the special tokens will be added.
181
+ token_ids_1 (`List[int]`, *optional*):
182
+ Optional second list of IDs for sequence pairs.
183
+
184
+ Returns:
185
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
186
+ """
187
+ if token_ids_1 is None:
188
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
189
+ cls = [self.cls_token_id]
190
+ sep = [self.sep_token_id]
191
+ return cls + token_ids_0 + sep + token_ids_1 + sep
192
+
193
+ def create_token_type_ids_from_sequences(
194
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
195
+ ) -> List[int]:
196
+ """
197
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
198
+ sequence pair mask has the following format:
199
+
200
+ ```
201
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
202
+ | first sequence | second sequence |
203
+ ```
204
+
205
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
206
+
207
+ Args:
208
+ token_ids_0 (`List[int]`):
209
+ List of IDs.
210
+ token_ids_1 (`List[int]`, *optional*):
211
+ Optional second list of IDs for sequence pairs.
212
+
213
+ Returns:
214
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
215
+ """
216
+ sep = [self.sep_token_id]
217
+ cls = [self.cls_token_id]
218
+
219
+ if token_ids_1 is None:
220
+ return len(cls + token_ids_0 + sep) * [0]
221
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
222
+
223
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._batch_encode_plus
224
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
225
+ is_split_into_words = kwargs.get("is_split_into_words", False)
226
+ assert self.add_prefix_space or not is_split_into_words, (
227
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
228
+ "to use it with pretokenized inputs."
229
+ )
230
+
231
+ return super()._batch_encode_plus(*args, **kwargs)
232
+
233
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._encode_plus
234
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
235
+ is_split_into_words = kwargs.get("is_split_into_words", False)
236
+
237
+ assert self.add_prefix_space or not is_split_into_words, (
238
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
239
+ "to use it with pretokenized inputs."
240
+ )
241
+
242
+ return super()._encode_plus(*args, **kwargs)
243
+
244
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
245
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
246
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
247
+ return tuple(files)
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc ADDED
Binary file (67.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encodec/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_encodec": [
25
+ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "EncodecConfig",
27
+ ],
28
+ "feature_extraction_encodec": ["EncodecFeatureExtractor"],
29
+ }
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_encodec"] = [
38
+ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "EncodecModel",
40
+ "EncodecPreTrainedModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_encodec import (
45
+ ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
46
+ EncodecConfig,
47
+ )
48
+ from .feature_extraction_encodec import EncodecFeatureExtractor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_encodec import (
57
+ ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ EncodecModel,
59
+ EncodecPreTrainedModel,
60
+ )
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (986 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc ADDED
Binary file (7.44 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc ADDED
Binary file (8.01 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ EnCodec model configuration"""
16
+
17
+
18
+ import math
19
+ from typing import Optional
20
+
21
+ import numpy as np
22
+
23
+ from ...configuration_utils import PretrainedConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class EncodecConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a
36
+ Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration
37
+ with the defaults will yield a similar configuration to that of the
38
+ [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ target_bandwidths (`List[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`):
45
+ The range of diffent bandwiths the model can encode audio with.
46
+ sampling_rate (`int`, *optional*, defaults to 24000):
47
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
48
+ audio_channels (`int`, *optional*, defaults to 1):
49
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo.
50
+ normalize (`bool`, *optional*, defaults to `False`):
51
+ Whether the audio shall be normalized when passed.
52
+ chunk_length_s (`float`, *optional*):
53
+ If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
54
+ overlap (`float`, *optional*):
55
+ Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
56
+ formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
57
+ hidden_size (`int`, *optional*, defaults to 128):
58
+ Intermediate representation dimension.
59
+ num_filters (`int`, *optional*, defaults to 32):
60
+ Number of convolution kernels of first `EncodecConv1d` down sampling layer.
61
+ num_residual_layers (`int`, *optional*, defaults to 1):
62
+ Number of residual layers.
63
+ upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`):
64
+ Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
65
+ will use the ratios in the reverse order to the ones specified here that must match the decoder order.
66
+ norm_type (`str`, *optional*, defaults to `"weight_norm"`):
67
+ Normalization method. Should be in `["weight_norm", "time_group_norm"]`
68
+ kernel_size (`int`, *optional*, defaults to 7):
69
+ Kernel size for the initial convolution.
70
+ last_kernel_size (`int`, *optional*, defaults to 7):
71
+ Kernel size for the last convolution layer.
72
+ residual_kernel_size (`int`, *optional*, defaults to 3):
73
+ Kernel size for the residual layers.
74
+ dilation_growth_rate (`int`, *optional*, defaults to 2):
75
+ How much to increase the dilation with each layer.
76
+ use_causal_conv (`bool`, *optional*, defaults to `True`):
77
+ Whether to use fully causal convolution.
78
+ pad_mode (`str`, *optional*, defaults to `"reflect"`):
79
+ Padding mode for the convolutions.
80
+ compress (`int`, *optional*, defaults to 2):
81
+ Reduced dimensionality in residual branches (from Demucs v3).
82
+ num_lstm_layers (`int`, *optional*, defaults to 2):
83
+ Number of LSTM layers at the end of the encoder.
84
+ trim_right_ratio (`float`, *optional*, defaults to 1.0):
85
+ Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
86
+ equal to 1.0, it means that all the trimming is done at the right.
87
+ codebook_size (`int`, *optional*, defaults to 1024):
88
+ Number of discret codes that make up VQVAE.
89
+ codebook_dim (`int`, *optional*):
90
+ Dimension of the codebook vectors. If not defined, uses `hidden_size`.
91
+ use_conv_shortcut (`bool`, *optional*, defaults to `True`):
92
+ Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False,
93
+ an identity function will be used, giving a generic residual connection.
94
+
95
+ Example:
96
+
97
+ ```python
98
+ >>> from transformers import EncodecModel, EncodecConfig
99
+
100
+ >>> # Initializing a "facebook/encodec_24khz" style configuration
101
+ >>> configuration = EncodecConfig()
102
+
103
+ >>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration
104
+ >>> model = EncodecModel(configuration)
105
+
106
+ >>> # Accessing the model configuration
107
+ >>> configuration = model.config
108
+ ```"""
109
+
110
+ model_type = "encodec"
111
+
112
+ def __init__(
113
+ self,
114
+ target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0],
115
+ sampling_rate=24_000,
116
+ audio_channels=1,
117
+ normalize=False,
118
+ chunk_length_s=None,
119
+ overlap=None,
120
+ hidden_size=128,
121
+ num_filters=32,
122
+ num_residual_layers=1,
123
+ upsampling_ratios=[8, 5, 4, 2],
124
+ norm_type="weight_norm",
125
+ kernel_size=7,
126
+ last_kernel_size=7,
127
+ residual_kernel_size=3,
128
+ dilation_growth_rate=2,
129
+ use_causal_conv=True,
130
+ pad_mode="reflect",
131
+ compress=2,
132
+ num_lstm_layers=2,
133
+ trim_right_ratio=1.0,
134
+ codebook_size=1024,
135
+ codebook_dim=None,
136
+ use_conv_shortcut=True,
137
+ **kwargs,
138
+ ):
139
+ self.target_bandwidths = target_bandwidths
140
+ self.sampling_rate = sampling_rate
141
+ self.audio_channels = audio_channels
142
+ self.normalize = normalize
143
+ self.chunk_length_s = chunk_length_s
144
+ self.overlap = overlap
145
+ self.hidden_size = hidden_size
146
+ self.num_filters = num_filters
147
+ self.num_residual_layers = num_residual_layers
148
+ self.upsampling_ratios = upsampling_ratios
149
+ self.norm_type = norm_type
150
+ self.kernel_size = kernel_size
151
+ self.last_kernel_size = last_kernel_size
152
+ self.residual_kernel_size = residual_kernel_size
153
+ self.dilation_growth_rate = dilation_growth_rate
154
+ self.use_causal_conv = use_causal_conv
155
+ self.pad_mode = pad_mode
156
+ self.compress = compress
157
+ self.num_lstm_layers = num_lstm_layers
158
+ self.trim_right_ratio = trim_right_ratio
159
+ self.codebook_size = codebook_size
160
+ self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size
161
+ self.use_conv_shortcut = use_conv_shortcut
162
+
163
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
164
+ raise ValueError(
165
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
166
+ )
167
+
168
+ super().__init__(**kwargs)
169
+
170
+ # This is a property because you might want to change the chunk_length_s on the fly
171
+ @property
172
+ def chunk_length(self) -> Optional[int]:
173
+ if self.chunk_length_s is None:
174
+ return None
175
+ else:
176
+ return int(self.chunk_length_s * self.sampling_rate)
177
+
178
+ # This is a property because you might want to change the chunk_length_s on the fly
179
+ @property
180
+ def chunk_stride(self) -> Optional[int]:
181
+ if self.chunk_length_s is None or self.overlap is None:
182
+ return None
183
+ else:
184
+ return max(1, int((1.0 - self.overlap) * self.chunk_length))
185
+
186
+ @property
187
+ def frame_rate(self) -> int:
188
+ hop_length = np.prod(self.upsampling_ratios)
189
+ return math.ceil(self.sampling_rate / hop_length)
190
+
191
+ @property
192
+ def num_quantizers(self) -> int:
193
+ return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
venv/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert EnCodec checkpoints."""
16
+
17
+ import argparse
18
+
19
+ import torch
20
+
21
+ from transformers import (
22
+ EncodecConfig,
23
+ EncodecFeatureExtractor,
24
+ EncodecModel,
25
+ logging,
26
+ )
27
+
28
+
29
+ # checkpoints downloaded from:
30
+ # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
31
+ # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
32
+ # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
33
+
34
+
35
+ logging.set_verbosity_info()
36
+ logger = logging.get_logger("transformers.models.encodec")
37
+
38
+ MAPPING_QUANTIZER = {
39
+ "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
40
+ "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
41
+ "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
42
+ "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
43
+ }
44
+ MAPPING_ENCODER = {
45
+ "encoder.model.0.conv.conv": "encoder.layers.0.conv",
46
+ "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
47
+ "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
48
+ "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
49
+ "encoder.model.3.conv.conv": "encoder.layers.3.conv",
50
+ "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
51
+ "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
52
+ "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
53
+ "encoder.model.6.conv.conv": "encoder.layers.6.conv",
54
+ "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
55
+ "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
56
+ "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
57
+ "encoder.model.9.conv.conv": "encoder.layers.9.conv",
58
+ "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
59
+ "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
60
+ "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
61
+ "encoder.model.12.conv.conv": "encoder.layers.12.conv",
62
+ "encoder.model.13.lstm": "encoder.layers.13.lstm",
63
+ "encoder.model.15.conv.conv": "encoder.layers.15.conv",
64
+ }
65
+ MAPPING_ENCODER_48K = {
66
+ "encoder.model.0.conv.norm": "encoder.layers.0.norm",
67
+ "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
68
+ "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
69
+ "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
70
+ "encoder.model.3.conv.norm": "encoder.layers.3.norm",
71
+ "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
72
+ "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
73
+ "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
74
+ "encoder.model.6.conv.norm": "encoder.layers.6.norm",
75
+ "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
76
+ "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
77
+ "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
78
+ "encoder.model.9.conv.norm": "encoder.layers.9.norm",
79
+ "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
80
+ "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
81
+ "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
82
+ "encoder.model.12.conv.norm": "encoder.layers.12.norm",
83
+ "encoder.model.15.conv.norm": "encoder.layers.15.norm",
84
+ }
85
+ MAPPING_DECODER = {
86
+ "decoder.model.0.conv.conv": "decoder.layers.0.conv",
87
+ "decoder.model.1.lstm": "decoder.layers.1.lstm",
88
+ "decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
89
+ "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
90
+ "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
91
+ "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
92
+ "decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
93
+ "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
94
+ "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
95
+ "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
96
+ "decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
97
+ "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
98
+ "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
99
+ "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
100
+ "decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
101
+ "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
102
+ "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
103
+ "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
104
+ "decoder.model.15.conv.conv": "decoder.layers.15.conv",
105
+ }
106
+ MAPPING_DECODER_48K = {
107
+ "decoder.model.0.conv.norm": "decoder.layers.0.norm",
108
+ "decoder.model.3.convtr.norm": "decoder.layers.3.norm",
109
+ "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
110
+ "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
111
+ "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
112
+ "decoder.model.6.convtr.norm": "decoder.layers.6.norm",
113
+ "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
114
+ "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
115
+ "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
116
+ "decoder.model.9.convtr.norm": "decoder.layers.9.norm",
117
+ "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
118
+ "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
119
+ "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
120
+ "decoder.model.12.convtr.norm": "decoder.layers.12.norm",
121
+ "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
122
+ "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
123
+ "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
124
+ "decoder.model.15.conv.norm": "decoder.layers.15.norm",
125
+ }
126
+ MAPPING_24K = {
127
+ **MAPPING_QUANTIZER,
128
+ **MAPPING_ENCODER,
129
+ **MAPPING_DECODER,
130
+ }
131
+ MAPPING_48K = {
132
+ **MAPPING_QUANTIZER,
133
+ **MAPPING_ENCODER,
134
+ **MAPPING_ENCODER_48K,
135
+ **MAPPING_DECODER,
136
+ **MAPPING_DECODER_48K,
137
+ }
138
+ TOP_LEVEL_KEYS = []
139
+ IGNORE_KEYS = []
140
+
141
+
142
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
143
+ for attribute in key.split("."):
144
+ hf_pointer = getattr(hf_pointer, attribute)
145
+
146
+ if weight_type is not None:
147
+ hf_shape = getattr(hf_pointer, weight_type).shape
148
+ else:
149
+ hf_shape = hf_pointer.shape
150
+
151
+ if hf_shape != value.shape:
152
+ raise ValueError(
153
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
154
+ f" {value.shape} for {full_name}"
155
+ )
156
+
157
+ if weight_type == "weight":
158
+ hf_pointer.weight.data = value
159
+ elif weight_type == "weight_g":
160
+ hf_pointer.weight_g.data = value
161
+ elif weight_type == "weight_v":
162
+ hf_pointer.weight_v.data = value
163
+ elif weight_type == "bias":
164
+ hf_pointer.bias.data = value
165
+ elif weight_type == "running_mean":
166
+ hf_pointer.running_mean.data = value
167
+ elif weight_type == "running_var":
168
+ hf_pointer.running_var.data = value
169
+ elif weight_type == "num_batches_tracked":
170
+ hf_pointer.num_batches_tracked.data = value
171
+ elif weight_type == "weight_ih_l0":
172
+ hf_pointer.weight_ih_l0.data = value
173
+ elif weight_type == "weight_hh_l0":
174
+ hf_pointer.weight_hh_l0.data = value
175
+ elif weight_type == "bias_ih_l0":
176
+ hf_pointer.bias_ih_l0.data = value
177
+ elif weight_type == "bias_hh_l0":
178
+ hf_pointer.bias_hh_l0.data = value
179
+ elif weight_type == "weight_ih_l1":
180
+ hf_pointer.weight_ih_l1.data = value
181
+ elif weight_type == "weight_hh_l1":
182
+ hf_pointer.weight_hh_l1.data = value
183
+ elif weight_type == "bias_ih_l1":
184
+ hf_pointer.bias_ih_l1.data = value
185
+ elif weight_type == "bias_hh_l1":
186
+ hf_pointer.bias_hh_l1.data = value
187
+ else:
188
+ hf_pointer.data = value
189
+
190
+ logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
191
+
192
+
193
+ def should_ignore(name, ignore_keys):
194
+ for key in ignore_keys:
195
+ if key.endswith(".*"):
196
+ if name.startswith(key[:-1]):
197
+ return True
198
+ elif ".*." in key:
199
+ prefix, suffix = key.split(".*.")
200
+ if prefix in name and suffix in name:
201
+ return True
202
+ elif key in name:
203
+ return True
204
+ return False
205
+
206
+
207
+ def recursively_load_weights(orig_dict, hf_model, model_name):
208
+ unused_weights = []
209
+
210
+ if model_name == "encodec_24khz" or "encodec_32khz":
211
+ MAPPING = MAPPING_24K
212
+ elif model_name == "encodec_48khz":
213
+ MAPPING = MAPPING_48K
214
+ else:
215
+ raise ValueError(f"Unsupported model: {model_name}")
216
+
217
+ for name, value in orig_dict.items():
218
+ if should_ignore(name, IGNORE_KEYS):
219
+ logger.info(f"{name} was ignored")
220
+ continue
221
+
222
+ is_used = False
223
+ for key, mapped_key in MAPPING.items():
224
+ if "*" in key:
225
+ prefix, suffix = key.split(".*.")
226
+ if prefix in name and suffix in name:
227
+ key = suffix
228
+
229
+ if key in name:
230
+ # HACK otherwise .embed gets initialized with .embed_avg too
231
+ if key.endswith("embed") and name.endswith("embed_avg"):
232
+ continue
233
+
234
+ is_used = True
235
+ if "*" in mapped_key:
236
+ layer_index = name.split(key)[0].split(".")[-2]
237
+ mapped_key = mapped_key.replace("*", layer_index)
238
+ if "weight_g" in name:
239
+ weight_type = "weight_g"
240
+ elif "weight_v" in name:
241
+ weight_type = "weight_v"
242
+ elif "weight_ih_l0" in name:
243
+ weight_type = "weight_ih_l0"
244
+ elif "weight_hh_l0" in name:
245
+ weight_type = "weight_hh_l0"
246
+ elif "bias_ih_l0" in name:
247
+ weight_type = "bias_ih_l0"
248
+ elif "bias_hh_l0" in name:
249
+ weight_type = "bias_hh_l0"
250
+ elif "weight_ih_l1" in name:
251
+ weight_type = "weight_ih_l1"
252
+ elif "weight_hh_l1" in name:
253
+ weight_type = "weight_hh_l1"
254
+ elif "bias_ih_l1" in name:
255
+ weight_type = "bias_ih_l1"
256
+ elif "bias_hh_l1" in name:
257
+ weight_type = "bias_hh_l1"
258
+ elif "bias" in name:
259
+ weight_type = "bias"
260
+ elif "weight" in name:
261
+ weight_type = "weight"
262
+ elif "running_mean" in name:
263
+ weight_type = "running_mean"
264
+ elif "running_var" in name:
265
+ weight_type = "running_var"
266
+ elif "num_batches_tracked" in name:
267
+ weight_type = "num_batches_tracked"
268
+ else:
269
+ weight_type = None
270
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
271
+ continue
272
+ if not is_used:
273
+ unused_weights.append(name)
274
+
275
+ logger.warning(f"Unused weights: {unused_weights}")
276
+
277
+
278
+ @torch.no_grad()
279
+ def convert_checkpoint(
280
+ model_name,
281
+ checkpoint_path,
282
+ pytorch_dump_folder_path,
283
+ config_path=None,
284
+ repo_id=None,
285
+ ):
286
+ """
287
+ Copy/paste/tweak model's weights to transformers design.
288
+ """
289
+ if config_path is not None:
290
+ config = EncodecConfig.from_pretrained(config_path)
291
+ else:
292
+ config = EncodecConfig()
293
+
294
+ if model_name == "encodec_24khz":
295
+ pass # config is already correct
296
+ elif model_name == "encodec_32khz":
297
+ config.upsampling_ratios = [8, 5, 4, 4]
298
+ config.target_bandwidths = [2.2]
299
+ config.num_filters = 64
300
+ config.sampling_rate = 32_000
301
+ config.codebook_size = 2048
302
+ config.use_causal_conv = False
303
+ config.normalize = False
304
+ config.use_conv_shortcut = False
305
+ elif model_name == "encodec_48khz":
306
+ config.upsampling_ratios = [8, 5, 4, 2]
307
+ config.target_bandwidths = [3.0, 6.0, 12.0, 24.0]
308
+ config.sampling_rate = 48_000
309
+ config.audio_channels = 2
310
+ config.use_causal_conv = False
311
+ config.norm_type = "time_group_norm"
312
+ config.normalize = True
313
+ config.chunk_length_s = 1.0
314
+ config.overlap = 0.01
315
+ else:
316
+ raise ValueError(f"Unknown model name: {model_name}")
317
+
318
+ model = EncodecModel(config)
319
+
320
+ feature_extractor = EncodecFeatureExtractor(
321
+ feature_size=config.audio_channels,
322
+ sampling_rate=config.sampling_rate,
323
+ chunk_length_s=config.chunk_length_s,
324
+ overlap=config.overlap,
325
+ )
326
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
327
+
328
+ original_checkpoint = torch.load(checkpoint_path)
329
+ if "best_state" in original_checkpoint:
330
+ # we might have a training state saved, in which case discard the yaml results and just retain the weights
331
+ original_checkpoint = original_checkpoint["best_state"]
332
+ recursively_load_weights(original_checkpoint, model, model_name)
333
+ model.save_pretrained(pytorch_dump_folder_path)
334
+
335
+ if repo_id:
336
+ print("Pushing to the hub...")
337
+ feature_extractor.push_to_hub(repo_id)
338
+ model.push_to_hub(repo_id)
339
+
340
+
341
+ if __name__ == "__main__":
342
+ parser = argparse.ArgumentParser()
343
+ parser.add_argument(
344
+ "--model",
345
+ default="encodec_24khz",
346
+ type=str,
347
+ help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
348
+ )
349
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
350
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
351
+ parser.add_argument(
352
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
353
+ )
354
+ parser.add_argument(
355
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
356
+ )
357
+
358
+ args = parser.parse_args()
359
+ convert_checkpoint(
360
+ args.model,
361
+ args.checkpoint_path,
362
+ args.pytorch_dump_folder_path,
363
+ args.config_path,
364
+ args.push_to_hub,
365
+ )
venv/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for EnCodec."""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...utils import PaddingStrategy, TensorType, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class EncodecFeatureExtractor(SequenceFeatureExtractor):
30
+ r"""
31
+ Constructs an EnCodec feature extractor.
32
+
33
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
34
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
35
+
36
+ Instantiating a feature extractor with the defaults will yield a similar configuration to that of the
37
+ [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
38
+
39
+ Args:
40
+ feature_size (`int`, *optional*, defaults to 1):
41
+ The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
42
+ sampling_rate (`int`, *optional*, defaults to 24000):
43
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
44
+ padding_value (`float`, *optional*, defaults to 0.0):
45
+ The value that is used to fill the padding values.
46
+ chunk_length_s (`float`, *optional*):
47
+ If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
48
+ overlap (`float`, *optional*):
49
+ Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
50
+ formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
51
+ """
52
+
53
+ model_input_names = ["input_values", "padding_mask"]
54
+
55
+ def __init__(
56
+ self,
57
+ feature_size: int = 1,
58
+ sampling_rate: int = 24000,
59
+ padding_value: float = 0.0,
60
+ chunk_length_s: float = None,
61
+ overlap: float = None,
62
+ **kwargs,
63
+ ):
64
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
65
+ self.chunk_length_s = chunk_length_s
66
+ self.overlap = overlap
67
+
68
+ # This is a property because you might want to change the chunk_length_s on the fly
69
+ @property
70
+ def chunk_length(self) -> Optional[int]:
71
+ if self.chunk_length_s is None:
72
+ return None
73
+ else:
74
+ return int(self.chunk_length_s * self.sampling_rate)
75
+
76
+ # This is a property because you might want to change the chunk_length_s on the fly
77
+ @property
78
+ def chunk_stride(self) -> Optional[int]:
79
+ if self.chunk_length_s is None or self.overlap is None:
80
+ return None
81
+ else:
82
+ return max(1, int((1.0 - self.overlap) * self.chunk_length))
83
+
84
+ def __call__(
85
+ self,
86
+ raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
87
+ padding: Optional[Union[bool, str, PaddingStrategy]] = None,
88
+ truncation: Optional[bool] = False,
89
+ max_length: Optional[int] = None,
90
+ return_tensors: Optional[Union[str, TensorType]] = None,
91
+ sampling_rate: Optional[int] = None,
92
+ ) -> BatchFeature:
93
+ """
94
+ Main method to featurize and prepare for the model one or several sequence(s).
95
+
96
+ Args:
97
+ raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
98
+ The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
99
+ values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
100
+ `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
101
+ (`feature_size = 2`).
102
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
103
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
104
+ index) among:
105
+
106
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
107
+ sequence if provided).
108
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
109
+ acceptable input length for the model if that argument is not provided.
110
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
111
+ lengths).
112
+ truncation (`bool`, *optional*, defaults to `False`):
113
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
114
+ max_length (`int`, *optional*):
115
+ Maximum length of the returned list and optionally padding length (see above).
116
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
117
+ If set, will return tensors instead of list of python integers. Acceptable values are:
118
+
119
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
120
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
121
+ - `'np'`: Return Numpy `np.ndarray` objects.
122
+ sampling_rate (`int`, *optional*):
123
+ The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
124
+ `sampling_rate` at the forward call to prevent silent errors.
125
+ """
126
+ if sampling_rate is not None:
127
+ if sampling_rate != self.sampling_rate:
128
+ raise ValueError(
129
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
130
+ f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
131
+ f" {self.sampling_rate} and not {sampling_rate}."
132
+ )
133
+ else:
134
+ logger.warning(
135
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
136
+ "Failing to do so can result in silent errors that might be hard to debug."
137
+ )
138
+
139
+ if padding and truncation:
140
+ raise ValueError("Both padding and truncation were set. Make sure you only set one.")
141
+ elif padding is None:
142
+ # by default let's pad the inputs
143
+ padding = True
144
+
145
+ is_batched = bool(
146
+ isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
147
+ )
148
+
149
+ if is_batched:
150
+ raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
151
+ elif not is_batched and not isinstance(raw_audio, np.ndarray):
152
+ raw_audio = np.asarray(raw_audio, dtype=np.float32)
153
+ elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
154
+ raw_audio = raw_audio.astype(np.float32)
155
+
156
+ # always return batch
157
+ if not is_batched:
158
+ raw_audio = [np.asarray(raw_audio).T]
159
+
160
+ # verify inputs are valid
161
+ for idx, example in enumerate(raw_audio):
162
+ if example.ndim > 2:
163
+ raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
164
+ if self.feature_size == 1 and example.ndim != 1:
165
+ raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
166
+ if self.feature_size == 2 and example.shape[-1] != 2:
167
+ raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
168
+
169
+ padded_inputs = None
170
+ input_values = BatchFeature({"input_values": raw_audio})
171
+ if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
172
+ if truncation:
173
+ max_length = min(array.shape[0] for array in raw_audio)
174
+ nb_step = int(np.floor(max_length / self.chunk_stride))
175
+ max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
176
+ elif padding:
177
+ max_length = max(array.shape[0] for array in raw_audio)
178
+ nb_step = int(np.ceil(max_length / self.chunk_stride))
179
+ max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
180
+ padding = "max_length"
181
+ else:
182
+ padded_inputs = input_values
183
+
184
+ # normal padding on batch
185
+ if padded_inputs is None:
186
+ padded_inputs = self.pad(
187
+ input_values,
188
+ max_length=max_length,
189
+ truncation=truncation,
190
+ padding=padding,
191
+ return_attention_mask=padding,
192
+ )
193
+ if padding:
194
+ padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
195
+
196
+ input_values = []
197
+ for example in padded_inputs.pop("input_values"):
198
+ if self.feature_size == 1:
199
+ example = example[..., None]
200
+ input_values.append(example.T)
201
+
202
+ padded_inputs["input_values"] = input_values
203
+ if return_tensors is not None:
204
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
205
+
206
+ return padded_inputs
venv/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch EnCodec model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...modeling_utils import PreTrainedModel
26
+ from ...utils import (
27
+ ModelOutput,
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ logging,
31
+ replace_return_docstrings,
32
+ )
33
+ from .configuration_encodec import EncodecConfig
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ # General docstring
40
+ _CONFIG_FOR_DOC = "EncodecConfig"
41
+
42
+
43
+ from ..deprecated._archive_maps import ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
44
+
45
+
46
+ @dataclass
47
+ class EncodecOutput(ModelOutput):
48
+ """
49
+ Args:
50
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
51
+ Discret code embeddings computed using `model.encode`.
52
+ audio_values (`torch.FlaotTensor` of shape `(batch_size, sequence_length)`, *optional*)
53
+ Decoded audio values, obtained using the decoder part of Encodec.
54
+ """
55
+
56
+ audio_codes: torch.LongTensor = None
57
+ audio_values: torch.FloatTensor = None
58
+
59
+
60
+ @dataclass
61
+ class EncodecEncoderOutput(ModelOutput):
62
+ """
63
+ Args:
64
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
65
+ Discret code embeddings computed using `model.encode`.
66
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
67
+ Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
68
+ """
69
+
70
+ audio_codes: torch.LongTensor = None
71
+ audio_scales: torch.FloatTensor = None
72
+
73
+
74
+ @dataclass
75
+ class EncodecDecoderOutput(ModelOutput):
76
+ """
77
+ Args:
78
+ audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
79
+ Decoded audio values, obtained using the decoder part of Encodec.
80
+ """
81
+
82
+ audio_values: torch.FloatTensor = None
83
+
84
+
85
+ class EncodecConv1d(nn.Module):
86
+ """Conv1d with asymmetric or causal padding and normalization."""
87
+
88
+ def __init__(
89
+ self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1
90
+ ):
91
+ super().__init__()
92
+ self.causal = config.use_causal_conv
93
+ self.pad_mode = config.pad_mode
94
+ self.norm_type = config.norm_type
95
+
96
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
97
+ raise ValueError(
98
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
99
+ )
100
+
101
+ # warn user on unusual setup between dilation and stride
102
+ if stride > 1 and dilation > 1:
103
+ logger.warning(
104
+ "EncodecConv1d has been initialized with stride > 1 and dilation > 1"
105
+ f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})."
106
+ )
107
+
108
+ self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation)
109
+ if self.norm_type == "weight_norm":
110
+ self.conv = nn.utils.weight_norm(self.conv)
111
+ elif self.norm_type == "time_group_norm":
112
+ self.norm = nn.GroupNorm(1, out_channels)
113
+
114
+ kernel_size = self.conv.kernel_size[0]
115
+ stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
116
+ dilation = self.conv.dilation[0]
117
+
118
+ # Effective kernel size with dilations.
119
+ kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
120
+
121
+ self.register_buffer("stride", stride, persistent=False)
122
+ self.register_buffer("kernel_size", kernel_size, persistent=False)
123
+ self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False)
124
+
125
+ def _get_extra_padding_for_conv1d(
126
+ self,
127
+ hidden_states: torch.Tensor,
128
+ ) -> torch.Tensor:
129
+ """See `pad_for_conv1d`."""
130
+ length = hidden_states.shape[-1]
131
+ n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
132
+ n_frames = torch.ceil(n_frames).to(torch.int64) - 1
133
+ ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
134
+
135
+ return ideal_length - length
136
+
137
+ @staticmethod
138
+ def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0):
139
+ """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
140
+ If this is the case, we insert extra 0 padding to the right before the reflection happens.
141
+ """
142
+ length = hidden_states.shape[-1]
143
+ padding_left, padding_right = paddings
144
+ if not mode == "reflect":
145
+ return nn.functional.pad(hidden_states, paddings, mode, value)
146
+
147
+ max_pad = max(padding_left, padding_right)
148
+ extra_pad = 0
149
+ if length <= max_pad:
150
+ extra_pad = max_pad - length + 1
151
+ hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
152
+ padded = nn.functional.pad(hidden_states, paddings, mode, value)
153
+ end = padded.shape[-1] - extra_pad
154
+ return padded[..., :end]
155
+
156
+ def forward(self, hidden_states):
157
+ extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
158
+
159
+ if self.causal:
160
+ # Left padding for causal
161
+ hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
162
+ else:
163
+ # Asymmetric padding required for odd strides
164
+ padding_right = self.padding_total // 2
165
+ padding_left = self.padding_total - padding_right
166
+ hidden_states = self._pad1d(
167
+ hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode
168
+ )
169
+
170
+ hidden_states = self.conv(hidden_states)
171
+
172
+ if self.norm_type == "time_group_norm":
173
+ hidden_states = self.norm(hidden_states)
174
+
175
+ return hidden_states
176
+
177
+
178
+ class EncodecConvTranspose1d(nn.Module):
179
+ """ConvTranspose1d with asymmetric or causal padding and normalization."""
180
+
181
+ def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1):
182
+ super().__init__()
183
+ self.causal = config.use_causal_conv
184
+ self.trim_right_ratio = config.trim_right_ratio
185
+ self.norm_type = config.norm_type
186
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
187
+ raise ValueError(
188
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
189
+ )
190
+
191
+ self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride)
192
+ if config.norm_type == "weight_norm":
193
+ self.conv = nn.utils.weight_norm(self.conv)
194
+ elif config.norm_type == "time_group_norm":
195
+ self.norm = nn.GroupNorm(1, out_channels)
196
+
197
+ if not (self.causal or self.trim_right_ratio == 1.0):
198
+ raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions")
199
+
200
+ def forward(self, hidden_states):
201
+ kernel_size = self.conv.kernel_size[0]
202
+ stride = self.conv.stride[0]
203
+ padding_total = kernel_size - stride
204
+
205
+ hidden_states = self.conv(hidden_states)
206
+
207
+ if self.norm_type == "time_group_norm":
208
+ hidden_states = self.norm(hidden_states)
209
+
210
+ # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
211
+ # removed at the very end, when keeping only the right length for the output,
212
+ # as removing it here would require also passing the length at the matching layer
213
+ # in the encoder.
214
+ if self.causal:
215
+ # Trim the padding on the right according to the specified ratio
216
+ # if trim_right_ratio = 1.0, trim everything from right
217
+ padding_right = math.ceil(padding_total * self.trim_right_ratio)
218
+ else:
219
+ # Asymmetric padding required for odd strides
220
+ padding_right = padding_total // 2
221
+
222
+ padding_left = padding_total - padding_right
223
+
224
+ # unpad
225
+ end = hidden_states.shape[-1] - padding_right
226
+ hidden_states = hidden_states[..., padding_left:end]
227
+ return hidden_states
228
+
229
+
230
+ class EncodecLSTM(nn.Module):
231
+ """
232
+ LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout.
233
+ """
234
+
235
+ def __init__(self, config, dimension):
236
+ super().__init__()
237
+ self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers)
238
+
239
+ def forward(self, hidden_states):
240
+ hidden_states = hidden_states.permute(2, 0, 1)
241
+ hidden_states = self.lstm(hidden_states)[0] + hidden_states
242
+ hidden_states = hidden_states.permute(1, 2, 0)
243
+ return hidden_states
244
+
245
+
246
+ class EncodecResnetBlock(nn.Module):
247
+ """
248
+ Residual block from SEANet model as used by EnCodec.
249
+ """
250
+
251
+ def __init__(self, config: EncodecConfig, dim: int, dilations: List[int]):
252
+ super().__init__()
253
+ kernel_sizes = (config.residual_kernel_size, 1)
254
+ if len(kernel_sizes) != len(dilations):
255
+ raise ValueError("Number of kernel sizes should match number of dilations")
256
+
257
+ hidden = dim // config.compress
258
+ block = []
259
+ for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
260
+ in_chs = dim if i == 0 else hidden
261
+ out_chs = dim if i == len(kernel_sizes) - 1 else hidden
262
+ block += [nn.ELU()]
263
+ block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
264
+ self.block = nn.ModuleList(block)
265
+
266
+ if config.use_conv_shortcut:
267
+ self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1)
268
+ else:
269
+ self.shortcut = nn.Identity()
270
+
271
+ def forward(self, hidden_states):
272
+ residual = hidden_states
273
+ for layer in self.block:
274
+ hidden_states = layer(hidden_states)
275
+
276
+ return self.shortcut(residual) + hidden_states
277
+
278
+
279
+ class EncodecEncoder(nn.Module):
280
+ """SEANet encoder as used by EnCodec."""
281
+
282
+ def __init__(self, config: EncodecConfig):
283
+ super().__init__()
284
+ model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)]
285
+ scaling = 1
286
+
287
+ # Downsample to raw audio scale
288
+ for ratio in reversed(config.upsampling_ratios):
289
+ current_scale = scaling * config.num_filters
290
+ # Add residual layers
291
+ for j in range(config.num_residual_layers):
292
+ model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])]
293
+ # Add downsampling layers
294
+ model += [nn.ELU()]
295
+ model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)]
296
+ scaling *= 2
297
+
298
+ model += [EncodecLSTM(config, scaling * config.num_filters)]
299
+ model += [nn.ELU()]
300
+ model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)]
301
+
302
+ self.layers = nn.ModuleList(model)
303
+
304
+ def forward(self, hidden_states):
305
+ for layer in self.layers:
306
+ hidden_states = layer(hidden_states)
307
+ return hidden_states
308
+
309
+
310
+ class EncodecDecoder(nn.Module):
311
+ """SEANet decoder as used by EnCodec."""
312
+
313
+ def __init__(self, config: EncodecConfig):
314
+ super().__init__()
315
+ scaling = int(2 ** len(config.upsampling_ratios))
316
+ model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
317
+
318
+ model += [EncodecLSTM(config, scaling * config.num_filters)]
319
+
320
+ # Upsample to raw audio scale
321
+ for ratio in config.upsampling_ratios:
322
+ current_scale = scaling * config.num_filters
323
+ # Add upsampling layers
324
+ model += [nn.ELU()]
325
+ model += [
326
+ EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)
327
+ ]
328
+ # Add residual layers
329
+ for j in range(config.num_residual_layers):
330
+ model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))]
331
+ scaling //= 2
332
+
333
+ # Add final layers
334
+ model += [nn.ELU()]
335
+ model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
336
+ self.layers = nn.ModuleList(model)
337
+
338
+ def forward(self, hidden_states):
339
+ for layer in self.layers:
340
+ hidden_states = layer(hidden_states)
341
+ return hidden_states
342
+
343
+
344
+ class EncodecEuclideanCodebook(nn.Module):
345
+ """Codebook with Euclidean distance."""
346
+
347
+ def __init__(self, config: EncodecConfig):
348
+ super().__init__()
349
+ embed = torch.zeros(config.codebook_size, config.codebook_dim)
350
+
351
+ self.codebook_size = config.codebook_size
352
+
353
+ self.register_buffer("inited", torch.Tensor([True]))
354
+ self.register_buffer("cluster_size", torch.zeros(config.codebook_size))
355
+ self.register_buffer("embed", embed)
356
+ self.register_buffer("embed_avg", embed.clone())
357
+
358
+ def quantize(self, hidden_states):
359
+ embed = self.embed.t()
360
+ scaled_states = hidden_states.pow(2).sum(1, keepdim=True)
361
+ dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True))
362
+ embed_ind = dist.max(dim=-1).indices
363
+ return embed_ind
364
+
365
+ def encode(self, hidden_states):
366
+ shape = hidden_states.shape
367
+ # pre-process
368
+ hidden_states = hidden_states.reshape((-1, shape[-1]))
369
+ # quantize
370
+ embed_ind = self.quantize(hidden_states)
371
+ # post-process
372
+ embed_ind = embed_ind.view(*shape[:-1])
373
+ return embed_ind
374
+
375
+ def decode(self, embed_ind):
376
+ quantize = nn.functional.embedding(embed_ind, self.embed)
377
+ return quantize
378
+
379
+
380
+ class EncodecVectorQuantization(nn.Module):
381
+ """
382
+ Vector quantization implementation. Currently supports only euclidean distance.
383
+ """
384
+
385
+ def __init__(self, config: EncodecConfig):
386
+ super().__init__()
387
+ self.codebook = EncodecEuclideanCodebook(config)
388
+
389
+ def encode(self, hidden_states):
390
+ hidden_states = hidden_states.permute(0, 2, 1)
391
+ embed_in = self.codebook.encode(hidden_states)
392
+ return embed_in
393
+
394
+ def decode(self, embed_ind):
395
+ quantize = self.codebook.decode(embed_ind)
396
+ quantize = quantize.permute(0, 2, 1)
397
+ return quantize
398
+
399
+
400
+ class EncodecResidualVectorQuantizer(nn.Module):
401
+ """Residual Vector Quantizer."""
402
+
403
+ def __init__(self, config: EncodecConfig):
404
+ super().__init__()
405
+ self.codebook_size = config.codebook_size
406
+ self.frame_rate = config.frame_rate
407
+ self.num_quantizers = config.num_quantizers
408
+ self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)])
409
+
410
+ def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float] = None) -> int:
411
+ """Return num_quantizers based on specified target bandwidth."""
412
+ bw_per_q = math.log2(self.codebook_size) * self.frame_rate
413
+ num_quantizers = self.num_quantizers
414
+ if bandwidth is not None and bandwidth > 0.0:
415
+ num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q)))
416
+ return num_quantizers
417
+
418
+ def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float] = None) -> torch.Tensor:
419
+ """
420
+ Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets
421
+ the appropriate number of quantizers to use and returns indices for each quantizer.
422
+ """
423
+ num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth)
424
+ residual = embeddings
425
+ all_indices = []
426
+ for layer in self.layers[:num_quantizers]:
427
+ indices = layer.encode(residual)
428
+ quantized = layer.decode(indices)
429
+ residual = residual - quantized
430
+ all_indices.append(indices)
431
+ out_indices = torch.stack(all_indices)
432
+ return out_indices
433
+
434
+ def decode(self, codes: torch.Tensor) -> torch.Tensor:
435
+ """Decode the given codes to the quantized representation."""
436
+ quantized_out = torch.tensor(0.0, device=codes.device)
437
+ for i, indices in enumerate(codes):
438
+ layer = self.layers[i]
439
+ quantized = layer.decode(indices)
440
+ quantized_out = quantized_out + quantized
441
+ return quantized_out
442
+
443
+
444
+ class EncodecPreTrainedModel(PreTrainedModel):
445
+ """
446
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
447
+ models.
448
+ """
449
+
450
+ config_class = EncodecConfig
451
+ base_model_prefix = "encodec"
452
+ main_input_name = "input_values"
453
+
454
+ def _init_weights(self, module):
455
+ """Initialize the weights"""
456
+ if isinstance(module, nn.Linear):
457
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
458
+ if module.bias is not None:
459
+ module.bias.data.zero_()
460
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
461
+ module.bias.data.zero_()
462
+ module.weight.data.fill_(1.0)
463
+ elif isinstance(module, nn.Conv1d):
464
+ nn.init.kaiming_normal_(module.weight)
465
+ if module.bias is not None:
466
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
467
+ nn.init.uniform_(module.bias, a=-k, b=k)
468
+ elif isinstance(module, nn.Embedding):
469
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
470
+ if module.padding_idx is not None:
471
+ module.weight.data[module.padding_idx].zero_()
472
+ elif isinstance(module, nn.LSTM):
473
+ for name, param in module.named_parameters():
474
+ if "weight" in name:
475
+ nn.init.xavier_uniform_(param)
476
+ elif "bias" in name:
477
+ nn.init.constant_(param, 0.0)
478
+
479
+
480
+ ENCODEC_START_DOCSTRING = r"""
481
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
482
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
483
+ etc.)
484
+
485
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
486
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
487
+ and behavior.
488
+
489
+ Parameters:
490
+ config ([`EncodecConfig`]):
491
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
492
+ load the weights associated with the model, only the configuration. Check out the
493
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
494
+ """
495
+
496
+
497
+ ENCODEC_INPUTS_DOCSTRING = r"""
498
+ Args:
499
+ input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
500
+ Raw audio input converted to Float and padded to the approriate length in order to be encoded using chunks
501
+ of length self.chunk_length and a stride of `config.chunk_stride`.
502
+ padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
503
+ Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+).
504
+ Mask values selected in `[0, 1]`:
505
+
506
+ - 1 for tokens that are **not masked**,
507
+ - 0 for tokens that are **masked**.
508
+
509
+ <Tip warning={true}>
510
+
511
+ `padding_mask` should always be passed, unless the input was truncated or not padded. This is because in
512
+ order to process tensors effectively, the input audio should be padded so that `input_length % stride =
513
+ step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape
514
+
515
+ </Tip>
516
+
517
+ bandwidth (`float`, *optional*):
518
+ The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
519
+ bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as
520
+ `bandwidth == 6.0`
521
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
522
+ Discret code embeddings computed using `model.encode`.
523
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
524
+ Scaling factor for each `audio_codes` input.
525
+ return_dict (`bool`, *optional*):
526
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
527
+ """
528
+
529
+
530
+ @add_start_docstrings(
531
+ "The EnCodec neural audio codec model.",
532
+ ENCODEC_START_DOCSTRING,
533
+ )
534
+ class EncodecModel(EncodecPreTrainedModel):
535
+ def __init__(self, config: EncodecConfig):
536
+ super().__init__(config)
537
+ self.config = config
538
+
539
+ self.encoder = EncodecEncoder(config)
540
+ self.decoder = EncodecDecoder(config)
541
+
542
+ self.quantizer = EncodecResidualVectorQuantizer(config)
543
+
544
+ self.bits_per_codebook = int(math.log2(self.config.codebook_size))
545
+ if 2**self.bits_per_codebook != self.config.codebook_size:
546
+ raise ValueError("The codebook_size must be a power of 2.")
547
+
548
+ # Initialize weights and apply final processing
549
+ self.post_init()
550
+
551
+ def get_encoder(self):
552
+ return self.encoder
553
+
554
+ def get_decoder(self):
555
+ return self.decoder
556
+
557
+ def _encode_frame(
558
+ self, input_values: torch.Tensor, bandwidth: float, padding_mask: int
559
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
560
+ """
561
+ Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first
562
+ normalized. The padding mask is required to compute the correct scale.
563
+ """
564
+ length = input_values.shape[-1]
565
+ duration = length / self.config.sampling_rate
566
+
567
+ if self.config.chunk_length_s is not None and duration > 1e-5 + self.config.chunk_length_s:
568
+ raise RuntimeError(f"Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}")
569
+
570
+ scale = None
571
+ if self.config.normalize:
572
+ # if the padding is non zero
573
+ input_values = input_values * padding_mask
574
+ mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1]
575
+ scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-8
576
+ input_values = input_values / scale
577
+
578
+ embeddings = self.encoder(input_values)
579
+ codes = self.quantizer.encode(embeddings, bandwidth)
580
+ codes = codes.transpose(0, 1)
581
+ return codes, scale
582
+
583
+ def encode(
584
+ self,
585
+ input_values: torch.Tensor,
586
+ padding_mask: torch.Tensor = None,
587
+ bandwidth: Optional[float] = None,
588
+ return_dict: Optional[bool] = None,
589
+ ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]:
590
+ """
591
+ Encodes the input audio waveform into discrete codes.
592
+
593
+ Args:
594
+ input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
595
+ Float values of the input audio waveform.
596
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
597
+ Padding mask used to pad the `input_values`.
598
+ bandwidth (`float`, *optional*):
599
+ The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
600
+ bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented
601
+ as bandwidth == 6.0
602
+
603
+ Returns:
604
+ A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling
605
+ factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with
606
+ `codebook` of shape `[batch_size, num_codebooks, frames]`.
607
+ """
608
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
609
+
610
+ if bandwidth is None:
611
+ bandwidth = self.config.target_bandwidths[0]
612
+ if bandwidth not in self.config.target_bandwidths:
613
+ raise ValueError(
614
+ f"This model doesn't support the bandwidth {bandwidth}. "
615
+ f"Select one of {self.config.target_bandwidths}."
616
+ )
617
+
618
+ _, channels, input_length = input_values.shape
619
+
620
+ if channels < 1 or channels > 2:
621
+ raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
622
+
623
+ chunk_length = self.config.chunk_length
624
+ if chunk_length is None:
625
+ chunk_length = input_length
626
+ stride = input_length
627
+ else:
628
+ stride = self.config.chunk_stride
629
+
630
+ if padding_mask is None:
631
+ padding_mask = torch.ones_like(input_values).bool()
632
+
633
+ encoded_frames = []
634
+ scales = []
635
+
636
+ step = chunk_length - stride
637
+ if (input_length % stride) - step != 0:
638
+ raise ValueError(
639
+ "The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly."
640
+ )
641
+
642
+ for offset in range(0, input_length - step, stride):
643
+ mask = padding_mask[..., offset : offset + chunk_length].bool()
644
+ frame = input_values[:, :, offset : offset + chunk_length]
645
+ encoded_frame, scale = self._encode_frame(frame, bandwidth, mask)
646
+ encoded_frames.append(encoded_frame)
647
+ scales.append(scale)
648
+
649
+ encoded_frames = torch.stack(encoded_frames)
650
+
651
+ if not return_dict:
652
+ return (encoded_frames, scales)
653
+
654
+ return EncodecEncoderOutput(encoded_frames, scales)
655
+
656
+ @staticmethod
657
+ def _linear_overlap_add(frames: List[torch.Tensor], stride: int):
658
+ # Generic overlap add, with linear fade-in/fade-out, supporting complex scenario
659
+ # e.g., more than 2 frames per position.
660
+ # The core idea is to use a weight function that is a triangle,
661
+ # with a maximum value at the middle of the chunk.
662
+ # We use this weighting when summing the frames, and divide by the sum of weights
663
+ # for each positions at the end. Thus:
664
+ # - if a frame is the only one to cover a position, the weighting is a no-op.
665
+ # - if 2 frames cover a position:
666
+ # ... ...
667
+ # / \/ \
668
+ # / /\ \
669
+ # S T , i.e. S offset of second frame starts, T end of first frame.
670
+ # Then the weight function for each one is: (t - S), (T - t), with `t` a given offset.
671
+ # After the final normalization, the weight of the second frame at position `t` is
672
+ # (t - S) / (t - S + (T - t)) = (t - S) / (T - S), which is exactly what we want.
673
+ #
674
+ # - if more than 2 frames overlap at a given point, we hope that by induction
675
+ # something sensible happens.
676
+ if len(frames) == 0:
677
+ raise ValueError("`frames` cannot be an empty list.")
678
+
679
+ device = frames[0].device
680
+ dtype = frames[0].dtype
681
+ shape = frames[0].shape[:-1]
682
+ total_size = stride * (len(frames) - 1) + frames[-1].shape[-1]
683
+
684
+ frame_length = frames[0].shape[-1]
685
+ time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1]
686
+ weight = 0.5 - (time_vec - 0.5).abs()
687
+
688
+ sum_weight = torch.zeros(total_size, device=device, dtype=dtype)
689
+ out = torch.zeros(*shape, total_size, device=device, dtype=dtype)
690
+ offset: int = 0
691
+
692
+ for frame in frames:
693
+ frame_length = frame.shape[-1]
694
+ out[..., offset : offset + frame_length] += weight[:frame_length] * frame
695
+ sum_weight[offset : offset + frame_length] += weight[:frame_length]
696
+ offset += stride
697
+
698
+ if sum_weight.min() == 0:
699
+ raise ValueError(f"`sum_weight` minimum element must be bigger than zero: {sum_weight}`")
700
+
701
+ return out / sum_weight
702
+
703
+ def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor] = None) -> torch.Tensor:
704
+ codes = codes.transpose(0, 1)
705
+ embeddings = self.quantizer.decode(codes)
706
+ outputs = self.decoder(embeddings)
707
+ if scale is not None:
708
+ outputs = outputs * scale.view(-1, 1, 1)
709
+ return outputs
710
+
711
+ def decode(
712
+ self,
713
+ audio_codes: torch.Tensor,
714
+ audio_scales: torch.Tensor,
715
+ padding_mask: Optional[torch.Tensor] = None,
716
+ return_dict: Optional[bool] = None,
717
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]:
718
+ """
719
+ Decodes the given frames into an output audio waveform.
720
+
721
+ Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
722
+ trimmed.
723
+
724
+ Args:
725
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
726
+ Discret code embeddings computed using `model.encode`.
727
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
728
+ Scaling factor for each `audio_codes` input.
729
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
730
+ Padding mask used to pad the `input_values`.
731
+ return_dict (`bool`, *optional*):
732
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
733
+
734
+ """
735
+ return_dict = return_dict or self.config.return_dict
736
+
737
+ chunk_length = self.config.chunk_length
738
+ if chunk_length is None:
739
+ if len(audio_codes) != 1:
740
+ raise ValueError(f"Expected one frame, got {len(audio_codes)}")
741
+ audio_values = self._decode_frame(audio_codes[0], audio_scales[0])
742
+ else:
743
+ decoded_frames = []
744
+
745
+ for frame, scale in zip(audio_codes, audio_scales):
746
+ frames = self._decode_frame(frame, scale)
747
+ decoded_frames.append(frames)
748
+
749
+ audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1)
750
+
751
+ # truncate based on padding mask
752
+ if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
753
+ audio_values = audio_values[..., : padding_mask.shape[-1]]
754
+
755
+ if not return_dict:
756
+ return (audio_values,)
757
+ return EncodecDecoderOutput(audio_values)
758
+
759
+ @add_start_docstrings_to_model_forward(ENCODEC_INPUTS_DOCSTRING)
760
+ @replace_return_docstrings(output_type=EncodecOutput, config_class=_CONFIG_FOR_DOC)
761
+ def forward(
762
+ self,
763
+ input_values: torch.Tensor,
764
+ padding_mask: Optional[torch.Tensor] = None,
765
+ bandwidth: Optional[float] = None,
766
+ audio_codes: Optional[torch.Tensor] = None,
767
+ audio_scales: Optional[torch.Tensor] = None,
768
+ return_dict: Optional[bool] = None,
769
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecOutput]:
770
+ r"""
771
+ Returns:
772
+
773
+ Examples:
774
+
775
+ ```python
776
+ >>> from datasets import load_dataset
777
+ >>> from transformers import AutoProcessor, EncodecModel
778
+
779
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
780
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
781
+
782
+ >>> model_id = "facebook/encodec_24khz"
783
+ >>> model = EncodecModel.from_pretrained(model_id)
784
+ >>> processor = AutoProcessor.from_pretrained(model_id)
785
+
786
+ >>> inputs = processor(raw_audio=audio_sample, return_tensors="pt")
787
+
788
+ >>> outputs = model(**inputs)
789
+ >>> audio_codes = outputs.audio_codes
790
+ >>> audio_values = outputs.audio_values
791
+ ```"""
792
+ return_dict = return_dict or self.config.return_dict
793
+
794
+ if padding_mask is None:
795
+ padding_mask = torch.ones_like(input_values).bool()
796
+
797
+ if audio_codes is not None and audio_scales is None:
798
+ raise ValueError("You specified `audio_codes` but did not specify the `audio_scales`")
799
+
800
+ if audio_scales is not None and audio_codes is None:
801
+ raise ValueError("You specified `audio_scales` but did not specify the `audio_codes`")
802
+
803
+ if audio_scales is None and audio_codes is None:
804
+ audio_codes, audio_scales = self.encode(input_values, padding_mask, bandwidth, False)
805
+
806
+ audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict)[0]
807
+ if not return_dict:
808
+ return (audio_codes, audio_values)
809
+
810
+ return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values)
venv/lib/python3.10/site-packages/transformers/models/glpn/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"]
28
+ _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_glpn"] = [
37
+ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "GLPNForDepthEstimation",
39
+ "GLPNLayer",
40
+ "GLPNModel",
41
+ "GLPNPreTrainedModel",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .feature_extraction_glpn import GLPNFeatureExtractor
55
+ from .image_processing_glpn import GLPNImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_glpn import (
64
+ GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ GLPNForDepthEstimation,
66
+ GLPNLayer,
67
+ GLPNModel,
68
+ GLPNPreTrainedModel,
69
+ )
70
+
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc ADDED
Binary file (9.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc ADDED
Binary file (23.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GLPN model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GLPNConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
30
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the GLPN
32
+ [vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ num_channels (`int`, *optional*, defaults to 3):
39
+ The number of input channels.
40
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
41
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
42
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
43
+ The number of layers in each encoder block.
44
+ sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
45
+ Sequence reduction ratios in each encoder block.
46
+ hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
47
+ Dimension of each of the encoder blocks.
48
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
49
+ Patch size before each encoder block.
50
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
51
+ Stride before each encoder block.
52
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
53
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
54
+ mlp_ratios (`List[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
55
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
56
+ encoder blocks.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for the attention probabilities.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
67
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
69
+ The epsilon used by the layer normalization layers.
70
+ decoder_hidden_size (`int`, *optional*, defaults to 64):
71
+ The dimension of the decoder.
72
+ max_depth (`int`, *optional*, defaults to 10):
73
+ The maximum depth of the decoder.
74
+ head_in_index (`int`, *optional*, defaults to -1):
75
+ The index of the features to use in the head.
76
+
77
+ Example:
78
+
79
+ ```python
80
+ >>> from transformers import GLPNModel, GLPNConfig
81
+
82
+ >>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
83
+ >>> configuration = GLPNConfig()
84
+
85
+ >>> # Initializing a model from the vinvino02/glpn-kitti style configuration
86
+ >>> model = GLPNModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```"""
91
+
92
+ model_type = "glpn"
93
+
94
+ def __init__(
95
+ self,
96
+ num_channels=3,
97
+ num_encoder_blocks=4,
98
+ depths=[2, 2, 2, 2],
99
+ sr_ratios=[8, 4, 2, 1],
100
+ hidden_sizes=[32, 64, 160, 256],
101
+ patch_sizes=[7, 3, 3, 3],
102
+ strides=[4, 2, 2, 2],
103
+ num_attention_heads=[1, 2, 5, 8],
104
+ mlp_ratios=[4, 4, 4, 4],
105
+ hidden_act="gelu",
106
+ hidden_dropout_prob=0.0,
107
+ attention_probs_dropout_prob=0.0,
108
+ initializer_range=0.02,
109
+ drop_path_rate=0.1,
110
+ layer_norm_eps=1e-6,
111
+ decoder_hidden_size=64,
112
+ max_depth=10,
113
+ head_in_index=-1,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(**kwargs)
117
+
118
+ self.num_channels = num_channels
119
+ self.num_encoder_blocks = num_encoder_blocks
120
+ self.depths = depths
121
+ self.sr_ratios = sr_ratios
122
+ self.hidden_sizes = hidden_sizes
123
+ self.patch_sizes = patch_sizes
124
+ self.strides = strides
125
+ self.mlp_ratios = mlp_ratios
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.hidden_dropout_prob = hidden_dropout_prob
129
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
130
+ self.initializer_range = initializer_range
131
+ self.drop_path_rate = drop_path_rate
132
+ self.layer_norm_eps = layer_norm_eps
133
+ self.decoder_hidden_size = decoder_hidden_size
134
+ self.max_depth = max_depth
135
+ self.head_in_index = head_in_index
venv/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert GLPN checkpoints."""
16
+
17
+
18
+ import argparse
19
+ from collections import OrderedDict
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from PIL import Image
25
+
26
+ from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
27
+ from transformers.utils import logging
28
+
29
+
30
+ logging.set_verbosity_info()
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ def rename_keys(state_dict):
35
+ new_state_dict = OrderedDict()
36
+ for key, value in state_dict.items():
37
+ if key.startswith("module.encoder"):
38
+ key = key.replace("module.encoder", "glpn.encoder")
39
+ if key.startswith("module.decoder"):
40
+ key = key.replace("module.decoder", "decoder.stages")
41
+ if "patch_embed" in key:
42
+ # replace for example patch_embed1 by patch_embeddings.0
43
+ idx = key[key.find("patch_embed") + len("patch_embed")]
44
+ key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}")
45
+ if "norm" in key:
46
+ key = key.replace("norm", "layer_norm")
47
+ if "glpn.encoder.layer_norm" in key:
48
+ # replace for example layer_norm1 by layer_norm.0
49
+ idx = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
50
+ key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}")
51
+ if "layer_norm1" in key:
52
+ key = key.replace("layer_norm1", "layer_norm_1")
53
+ if "layer_norm2" in key:
54
+ key = key.replace("layer_norm2", "layer_norm_2")
55
+ if "block" in key:
56
+ # replace for example block1 by block.0
57
+ idx = key[key.find("block") + len("block")]
58
+ key = key.replace(f"block{idx}", f"block.{int(idx)-1}")
59
+ if "attn.q" in key:
60
+ key = key.replace("attn.q", "attention.self.query")
61
+ if "attn.proj" in key:
62
+ key = key.replace("attn.proj", "attention.output.dense")
63
+ if "attn" in key:
64
+ key = key.replace("attn", "attention.self")
65
+ if "fc1" in key:
66
+ key = key.replace("fc1", "dense1")
67
+ if "fc2" in key:
68
+ key = key.replace("fc2", "dense2")
69
+ if "linear_pred" in key:
70
+ key = key.replace("linear_pred", "classifier")
71
+ if "linear_fuse" in key:
72
+ key = key.replace("linear_fuse.conv", "linear_fuse")
73
+ key = key.replace("linear_fuse.bn", "batch_norm")
74
+ if "linear_c" in key:
75
+ # replace for example linear_c4 by linear_c.3
76
+ idx = key[key.find("linear_c") + len("linear_c")]
77
+ key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}")
78
+ if "bot_conv" in key:
79
+ key = key.replace("bot_conv", "0.convolution")
80
+ if "skip_conv1" in key:
81
+ key = key.replace("skip_conv1", "1.convolution")
82
+ if "skip_conv2" in key:
83
+ key = key.replace("skip_conv2", "2.convolution")
84
+ if "fusion1" in key:
85
+ key = key.replace("fusion1", "1.fusion")
86
+ if "fusion2" in key:
87
+ key = key.replace("fusion2", "2.fusion")
88
+ if "fusion3" in key:
89
+ key = key.replace("fusion3", "3.fusion")
90
+ if "fusion" in key and "conv" in key:
91
+ key = key.replace("conv", "convolutional_layer")
92
+ if key.startswith("module.last_layer_depth"):
93
+ key = key.replace("module.last_layer_depth", "head.head")
94
+ new_state_dict[key] = value
95
+
96
+ return new_state_dict
97
+
98
+
99
+ def read_in_k_v(state_dict, config):
100
+ # for each of the encoder blocks:
101
+ for i in range(config.num_encoder_blocks):
102
+ for j in range(config.depths[i]):
103
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
104
+ kv_weight = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight")
105
+ kv_bias = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias")
106
+ # next, add keys and values (in that order) to the state dict
107
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[
108
+ : config.hidden_sizes[i], :
109
+ ]
110
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
111
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
112
+ config.hidden_sizes[i] :, :
113
+ ]
114
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
115
+
116
+
117
+ # We will verify our results on a COCO image
118
+ def prepare_img():
119
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
120
+ image = Image.open(requests.get(url, stream=True).raw)
121
+
122
+ return image
123
+
124
+
125
+ @torch.no_grad()
126
+ def convert_glpn_checkpoint(checkpoint_path, pytorch_dump_folder_path, push_to_hub=False, model_name=None):
127
+ """
128
+ Copy/paste/tweak model's weights to our GLPN structure.
129
+ """
130
+
131
+ # load GLPN configuration (Segformer-B4 size)
132
+ config = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
133
+
134
+ # load image processor (only resize + rescale)
135
+ image_processor = GLPNImageProcessor()
136
+
137
+ # prepare image
138
+ image = prepare_img()
139
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
140
+
141
+ logger.info("Converting model...")
142
+
143
+ # load original state dict
144
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
145
+
146
+ # rename keys
147
+ state_dict = rename_keys(state_dict)
148
+
149
+ # key and value matrices need special treatment
150
+ read_in_k_v(state_dict, config)
151
+
152
+ # create HuggingFace model and load state dict
153
+ model = GLPNForDepthEstimation(config)
154
+ model.load_state_dict(state_dict)
155
+ model.eval()
156
+
157
+ # forward pass
158
+ outputs = model(pixel_values)
159
+ predicted_depth = outputs.predicted_depth
160
+
161
+ # verify output
162
+ if model_name is not None:
163
+ if "nyu" in model_name:
164
+ expected_slice = torch.tensor(
165
+ [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]
166
+ )
167
+ elif "kitti" in model_name:
168
+ expected_slice = torch.tensor(
169
+ [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
170
+ )
171
+ else:
172
+ raise ValueError(f"Unknown model name: {model_name}")
173
+
174
+ expected_shape = torch.Size([1, 480, 640])
175
+
176
+ assert predicted_depth.shape == expected_shape
177
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
178
+ print("Looks ok!")
179
+
180
+ # finally, push to hub if required
181
+ if push_to_hub:
182
+ logger.info("Pushing model and image processor to the hub...")
183
+ model.push_to_hub(
184
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
185
+ organization="nielsr",
186
+ commit_message="Add model",
187
+ use_temp_dir=True,
188
+ )
189
+ image_processor.push_to_hub(
190
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
191
+ organization="nielsr",
192
+ commit_message="Add image processor",
193
+ use_temp_dir=True,
194
+ )
195
+
196
+
197
+ if __name__ == "__main__":
198
+ parser = argparse.ArgumentParser()
199
+
200
+ parser.add_argument(
201
+ "--checkpoint_path",
202
+ default=None,
203
+ type=str,
204
+ help="Path to the original PyTorch checkpoint (.pth file).",
205
+ )
206
+ parser.add_argument(
207
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
208
+ )
209
+ parser.add_argument(
210
+ "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
211
+ )
212
+ parser.add_argument(
213
+ "--model_name",
214
+ default="glpn-kitti",
215
+ type=str,
216
+ help="Name of the model in case you're pushing to the hub.",
217
+ )
218
+ args = parser.parse_args()
219
+ convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
venv/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for GLPN."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_glpn import GLPNImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class GLPNFeatureExtractor(GLPNImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use GLPNImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for GLPN."""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
23
+ from ...image_transforms import resize, to_channel_dimension_format
24
+ from ...image_utils import (
25
+ ChannelDimension,
26
+ PILImageResampling,
27
+ get_image_size,
28
+ infer_channel_dimension_format,
29
+ is_scaled_image,
30
+ make_list_of_images,
31
+ to_numpy_array,
32
+ valid_images,
33
+ validate_kwargs,
34
+ validate_preprocess_arguments,
35
+ )
36
+ from ...utils import TensorType, logging
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class GLPNImageProcessor(BaseImageProcessor):
43
+ r"""
44
+ Constructs a GLPN image processor.
45
+
46
+ Args:
47
+ do_resize (`bool`, *optional*, defaults to `True`):
48
+ Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
49
+ `size_divisor`. Can be overridden by `do_resize` in `preprocess`.
50
+ size_divisor (`int`, *optional*, defaults to 32):
51
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
52
+ multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
53
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
54
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
55
+ do_rescale (`bool`, *optional*, defaults to `True`):
56
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
57
+ overridden by `do_rescale` in `preprocess`.
58
+ """
59
+
60
+ model_input_names = ["pixel_values"]
61
+
62
+ def __init__(
63
+ self,
64
+ do_resize: bool = True,
65
+ size_divisor: int = 32,
66
+ resample=PILImageResampling.BILINEAR,
67
+ do_rescale: bool = True,
68
+ **kwargs,
69
+ ) -> None:
70
+ self.do_resize = do_resize
71
+ self.do_rescale = do_rescale
72
+ self.size_divisor = size_divisor
73
+ self.resample = resample
74
+ super().__init__(**kwargs)
75
+ self._valid_processor_keys = [
76
+ "images",
77
+ "do_resize",
78
+ "size_divisor",
79
+ "resample",
80
+ "do_rescale",
81
+ "return_tensors",
82
+ "data_format",
83
+ "input_data_format",
84
+ ]
85
+
86
+ def resize(
87
+ self,
88
+ image: np.ndarray,
89
+ size_divisor: int,
90
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
91
+ data_format: Optional[ChannelDimension] = None,
92
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
93
+ **kwargs,
94
+ ) -> np.ndarray:
95
+ """
96
+ Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
97
+
98
+ If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
99
+
100
+ Args:
101
+ image (`np.ndarray`):
102
+ The image to resize.
103
+ size_divisor (`int`):
104
+ The image is resized so its height and width are rounded down to the closest multiple of
105
+ `size_divisor`.
106
+ resample:
107
+ `PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
108
+ data_format (`ChannelDimension` or `str`, *optional*):
109
+ The channel dimension format for the output image. If `None`, the channel dimension format of the input
110
+ image is used. Can be one of:
111
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
112
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
113
+ input_data_format (`ChannelDimension` or `str`, *optional*):
114
+ The channel dimension format of the input image. If not set, the channel dimension format is inferred
115
+ from the input image. Can be one of:
116
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
117
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
118
+
119
+ Returns:
120
+ `np.ndarray`: The resized image.
121
+ """
122
+ height, width = get_image_size(image, channel_dim=input_data_format)
123
+ # Rounds the height and width down to the closest multiple of size_divisor
124
+ new_h = height // size_divisor * size_divisor
125
+ new_w = width // size_divisor * size_divisor
126
+ image = resize(
127
+ image,
128
+ (new_h, new_w),
129
+ resample=resample,
130
+ data_format=data_format,
131
+ input_data_format=input_data_format,
132
+ **kwargs,
133
+ )
134
+ return image
135
+
136
+ def preprocess(
137
+ self,
138
+ images: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]],
139
+ do_resize: Optional[bool] = None,
140
+ size_divisor: Optional[int] = None,
141
+ resample=None,
142
+ do_rescale: Optional[bool] = None,
143
+ return_tensors: Optional[Union[TensorType, str]] = None,
144
+ data_format: ChannelDimension = ChannelDimension.FIRST,
145
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
146
+ **kwargs,
147
+ ) -> BatchFeature:
148
+ """
149
+ Preprocess the given images.
150
+
151
+ Args:
152
+ images (`PIL.Image.Image` or `TensorType` or `List[np.ndarray]` or `List[TensorType]`):
153
+ Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
154
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
155
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
156
+ Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
157
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
158
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the
159
+ closest multiple of `size_divisor`.
160
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
161
+ `PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
162
+ an effect if `do_resize` is set to `True`.
163
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
164
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
165
+ return_tensors (`str` or `TensorType`, *optional*):
166
+ The type of tensors to return. Can be one of:
167
+ - `None`: Return a list of `np.ndarray`.
168
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
169
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
170
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
171
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
172
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
173
+ The channel dimension format for the output image. Can be one of:
174
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
175
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
176
+ input_data_format (`ChannelDimension` or `str`, *optional*):
177
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
178
+ from the input image. Can be one of:
179
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
180
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
181
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
182
+ """
183
+ do_resize = do_resize if do_resize is not None else self.do_resize
184
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
185
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
186
+ resample = resample if resample is not None else self.resample
187
+
188
+ images = make_list_of_images(images)
189
+
190
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
191
+
192
+ if not valid_images(images):
193
+ raise ValueError(
194
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
195
+ "torch.Tensor, tf.Tensor or jax.ndarray."
196
+ )
197
+
198
+ # Here, the rescale() method uses a constant rescale_factor. It does not need to be validated
199
+ # with a rescale_factor.
200
+ validate_preprocess_arguments(
201
+ do_resize=do_resize,
202
+ size=size_divisor, # Here, size_divisor is used as a parameter for optimal resizing instead of size.
203
+ resample=resample,
204
+ )
205
+
206
+ # All transformations expect numpy arrays.
207
+ images = [to_numpy_array(img) for img in images]
208
+
209
+ if is_scaled_image(images[0]) and do_rescale:
210
+ logger.warning_once(
211
+ "It looks like you are trying to rescale already rescaled images. If the input"
212
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
213
+ )
214
+
215
+ if input_data_format is None:
216
+ # We assume that all images have the same channel dimension format.
217
+ input_data_format = infer_channel_dimension_format(images[0])
218
+
219
+ if do_resize:
220
+ images = [
221
+ self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
222
+ for image in images
223
+ ]
224
+
225
+ if do_rescale:
226
+ images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images]
227
+
228
+ images = [
229
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
230
+ ]
231
+
232
+ data = {"pixel_values": images}
233
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GLPN model."""
16
+
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import (
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_glpn import GLPNConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ # General docstring
43
+ _CONFIG_FOR_DOC = "GLPNConfig"
44
+
45
+ # Base docstring
46
+ _CHECKPOINT_FOR_DOC = "vinvino02/glpn-kitti"
47
+ _EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20]
48
+
49
+
50
+ from ..deprecated._archive_maps import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ # Copied from transformers.models.beit.modeling_beit.drop_path
54
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
55
+ """
56
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
57
+
58
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
59
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
60
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
61
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
62
+ argument.
63
+ """
64
+ if drop_prob == 0.0 or not training:
65
+ return input
66
+ keep_prob = 1 - drop_prob
67
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
68
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
69
+ random_tensor.floor_() # binarize
70
+ output = input.div(keep_prob) * random_tensor
71
+ return output
72
+
73
+
74
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerDropPath
75
+ class GLPNDropPath(nn.Module):
76
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
77
+
78
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
79
+ super().__init__()
80
+ self.drop_prob = drop_prob
81
+
82
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
83
+ return drop_path(hidden_states, self.drop_prob, self.training)
84
+
85
+ def extra_repr(self) -> str:
86
+ return "p={}".format(self.drop_prob)
87
+
88
+
89
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerOverlapPatchEmbeddings
90
+ class GLPNOverlapPatchEmbeddings(nn.Module):
91
+ """Construct the overlapping patch embeddings."""
92
+
93
+ def __init__(self, patch_size, stride, num_channels, hidden_size):
94
+ super().__init__()
95
+ self.proj = nn.Conv2d(
96
+ num_channels,
97
+ hidden_size,
98
+ kernel_size=patch_size,
99
+ stride=stride,
100
+ padding=patch_size // 2,
101
+ )
102
+
103
+ self.layer_norm = nn.LayerNorm(hidden_size)
104
+
105
+ def forward(self, pixel_values):
106
+ embeddings = self.proj(pixel_values)
107
+ _, _, height, width = embeddings.shape
108
+ # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels)
109
+ # this can be fed to a Transformer layer
110
+ embeddings = embeddings.flatten(2).transpose(1, 2)
111
+ embeddings = self.layer_norm(embeddings)
112
+ return embeddings, height, width
113
+
114
+
115
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention
116
+ class GLPNEfficientSelfAttention(nn.Module):
117
+ """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
118
+ paper](https://arxiv.org/abs/2102.12122)."""
119
+
120
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
121
+ super().__init__()
122
+ self.hidden_size = hidden_size
123
+ self.num_attention_heads = num_attention_heads
124
+
125
+ if self.hidden_size % self.num_attention_heads != 0:
126
+ raise ValueError(
127
+ f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
128
+ f"heads ({self.num_attention_heads})"
129
+ )
130
+
131
+ self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
132
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
133
+
134
+ self.query = nn.Linear(self.hidden_size, self.all_head_size)
135
+ self.key = nn.Linear(self.hidden_size, self.all_head_size)
136
+ self.value = nn.Linear(self.hidden_size, self.all_head_size)
137
+
138
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
139
+
140
+ self.sr_ratio = sequence_reduction_ratio
141
+ if sequence_reduction_ratio > 1:
142
+ self.sr = nn.Conv2d(
143
+ hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio
144
+ )
145
+ self.layer_norm = nn.LayerNorm(hidden_size)
146
+
147
+ def transpose_for_scores(self, hidden_states):
148
+ new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
149
+ hidden_states = hidden_states.view(new_shape)
150
+ return hidden_states.permute(0, 2, 1, 3)
151
+
152
+ def forward(
153
+ self,
154
+ hidden_states,
155
+ height,
156
+ width,
157
+ output_attentions=False,
158
+ ):
159
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
160
+
161
+ if self.sr_ratio > 1:
162
+ batch_size, seq_len, num_channels = hidden_states.shape
163
+ # Reshape to (batch_size, num_channels, height, width)
164
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
165
+ # Apply sequence reduction
166
+ hidden_states = self.sr(hidden_states)
167
+ # Reshape back to (batch_size, seq_len, num_channels)
168
+ hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
169
+ hidden_states = self.layer_norm(hidden_states)
170
+
171
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
172
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
173
+
174
+ # Take the dot product between "query" and "key" to get the raw attention scores.
175
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
176
+
177
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
178
+
179
+ # Normalize the attention scores to probabilities.
180
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
181
+
182
+ # This is actually dropping out entire tokens to attend to, which might
183
+ # seem a bit unusual, but is taken from the original Transformer paper.
184
+ attention_probs = self.dropout(attention_probs)
185
+
186
+ context_layer = torch.matmul(attention_probs, value_layer)
187
+
188
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
189
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
190
+ context_layer = context_layer.view(new_context_layer_shape)
191
+
192
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
193
+
194
+ return outputs
195
+
196
+
197
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerSelfOutput
198
+ class GLPNSelfOutput(nn.Module):
199
+ def __init__(self, config, hidden_size):
200
+ super().__init__()
201
+ self.dense = nn.Linear(hidden_size, hidden_size)
202
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
203
+
204
+ def forward(self, hidden_states, input_tensor):
205
+ hidden_states = self.dense(hidden_states)
206
+ hidden_states = self.dropout(hidden_states)
207
+ return hidden_states
208
+
209
+
210
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerAttention with Segformer->GLPN
211
+ class GLPNAttention(nn.Module):
212
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
213
+ super().__init__()
214
+ self.self = GLPNEfficientSelfAttention(
215
+ config=config,
216
+ hidden_size=hidden_size,
217
+ num_attention_heads=num_attention_heads,
218
+ sequence_reduction_ratio=sequence_reduction_ratio,
219
+ )
220
+ self.output = GLPNSelfOutput(config, hidden_size=hidden_size)
221
+ self.pruned_heads = set()
222
+
223
+ def prune_heads(self, heads):
224
+ if len(heads) == 0:
225
+ return
226
+ heads, index = find_pruneable_heads_and_indices(
227
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
228
+ )
229
+
230
+ # Prune linear layers
231
+ self.self.query = prune_linear_layer(self.self.query, index)
232
+ self.self.key = prune_linear_layer(self.self.key, index)
233
+ self.self.value = prune_linear_layer(self.self.value, index)
234
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
235
+
236
+ # Update hyper params and store pruned heads
237
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
238
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
239
+ self.pruned_heads = self.pruned_heads.union(heads)
240
+
241
+ def forward(self, hidden_states, height, width, output_attentions=False):
242
+ self_outputs = self.self(hidden_states, height, width, output_attentions)
243
+
244
+ attention_output = self.output(self_outputs[0], hidden_states)
245
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
246
+ return outputs
247
+
248
+
249
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerDWConv
250
+ class GLPNDWConv(nn.Module):
251
+ def __init__(self, dim=768):
252
+ super().__init__()
253
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
254
+
255
+ def forward(self, hidden_states, height, width):
256
+ batch_size, seq_len, num_channels = hidden_states.shape
257
+ hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
258
+ hidden_states = self.dwconv(hidden_states)
259
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
260
+
261
+ return hidden_states
262
+
263
+
264
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerMixFFN with Segformer->GLPN
265
+ class GLPNMixFFN(nn.Module):
266
+ def __init__(self, config, in_features, hidden_features=None, out_features=None):
267
+ super().__init__()
268
+ out_features = out_features or in_features
269
+ self.dense1 = nn.Linear(in_features, hidden_features)
270
+ self.dwconv = GLPNDWConv(hidden_features)
271
+ if isinstance(config.hidden_act, str):
272
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
273
+ else:
274
+ self.intermediate_act_fn = config.hidden_act
275
+ self.dense2 = nn.Linear(hidden_features, out_features)
276
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
277
+
278
+ def forward(self, hidden_states, height, width):
279
+ hidden_states = self.dense1(hidden_states)
280
+ hidden_states = self.dwconv(hidden_states, height, width)
281
+ hidden_states = self.intermediate_act_fn(hidden_states)
282
+ hidden_states = self.dropout(hidden_states)
283
+ hidden_states = self.dense2(hidden_states)
284
+ hidden_states = self.dropout(hidden_states)
285
+ return hidden_states
286
+
287
+
288
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerLayer with Segformer->GLPN
289
+ class GLPNLayer(nn.Module):
290
+ """This corresponds to the Block class in the original implementation."""
291
+
292
+ def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
293
+ super().__init__()
294
+ self.layer_norm_1 = nn.LayerNorm(hidden_size)
295
+ self.attention = GLPNAttention(
296
+ config,
297
+ hidden_size=hidden_size,
298
+ num_attention_heads=num_attention_heads,
299
+ sequence_reduction_ratio=sequence_reduction_ratio,
300
+ )
301
+ self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
302
+ self.layer_norm_2 = nn.LayerNorm(hidden_size)
303
+ mlp_hidden_size = int(hidden_size * mlp_ratio)
304
+ self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
305
+
306
+ def forward(self, hidden_states, height, width, output_attentions=False):
307
+ self_attention_outputs = self.attention(
308
+ self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention
309
+ height,
310
+ width,
311
+ output_attentions=output_attentions,
312
+ )
313
+
314
+ attention_output = self_attention_outputs[0]
315
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
316
+
317
+ # first residual connection (with stochastic depth)
318
+ attention_output = self.drop_path(attention_output)
319
+ hidden_states = attention_output + hidden_states
320
+
321
+ mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
322
+
323
+ # second residual connection (with stochastic depth)
324
+ mlp_output = self.drop_path(mlp_output)
325
+ layer_output = mlp_output + hidden_states
326
+
327
+ outputs = (layer_output,) + outputs
328
+
329
+ return outputs
330
+
331
+
332
+ class GLPNEncoder(nn.Module):
333
+ def __init__(self, config):
334
+ super().__init__()
335
+ self.config = config
336
+
337
+ # stochastic depth decay rule
338
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
339
+
340
+ # patch embeddings
341
+ embeddings = []
342
+ for i in range(config.num_encoder_blocks):
343
+ embeddings.append(
344
+ GLPNOverlapPatchEmbeddings(
345
+ patch_size=config.patch_sizes[i],
346
+ stride=config.strides[i],
347
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
348
+ hidden_size=config.hidden_sizes[i],
349
+ )
350
+ )
351
+ self.patch_embeddings = nn.ModuleList(embeddings)
352
+
353
+ # Transformer blocks
354
+ blocks = []
355
+ cur = 0
356
+ for i in range(config.num_encoder_blocks):
357
+ # each block consists of layers
358
+ layers = []
359
+ if i != 0:
360
+ cur += config.depths[i - 1]
361
+ for j in range(config.depths[i]):
362
+ layers.append(
363
+ GLPNLayer(
364
+ config,
365
+ hidden_size=config.hidden_sizes[i],
366
+ num_attention_heads=config.num_attention_heads[i],
367
+ drop_path=dpr[cur + j],
368
+ sequence_reduction_ratio=config.sr_ratios[i],
369
+ mlp_ratio=config.mlp_ratios[i],
370
+ )
371
+ )
372
+ blocks.append(nn.ModuleList(layers))
373
+
374
+ self.block = nn.ModuleList(blocks)
375
+
376
+ # Layer norms
377
+ self.layer_norm = nn.ModuleList(
378
+ [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]
379
+ )
380
+
381
+ def forward(
382
+ self,
383
+ pixel_values,
384
+ output_attentions=False,
385
+ output_hidden_states=False,
386
+ return_dict=True,
387
+ ):
388
+ all_hidden_states = () if output_hidden_states else None
389
+ all_self_attentions = () if output_attentions else None
390
+
391
+ batch_size = pixel_values.shape[0]
392
+
393
+ hidden_states = pixel_values
394
+ for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
395
+ embedding_layer, block_layer, norm_layer = x
396
+ # first, obtain patch embeddings
397
+ hidden_states, height, width = embedding_layer(hidden_states)
398
+ # second, send embeddings through blocks
399
+ for i, blk in enumerate(block_layer):
400
+ layer_outputs = blk(hidden_states, height, width, output_attentions)
401
+ hidden_states = layer_outputs[0]
402
+ if output_attentions:
403
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
404
+ # third, apply layer norm
405
+ hidden_states = norm_layer(hidden_states)
406
+ # fourth, optionally reshape back to (batch_size, num_channels, height, width)
407
+ hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
408
+ if output_hidden_states:
409
+ all_hidden_states = all_hidden_states + (hidden_states,)
410
+
411
+ if not return_dict:
412
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
413
+ return BaseModelOutput(
414
+ last_hidden_state=hidden_states,
415
+ hidden_states=all_hidden_states,
416
+ attentions=all_self_attentions,
417
+ )
418
+
419
+
420
+ class GLPNPreTrainedModel(PreTrainedModel):
421
+ """
422
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
423
+ models.
424
+ """
425
+
426
+ config_class = GLPNConfig
427
+ base_model_prefix = "glpn"
428
+ main_input_name = "pixel_values"
429
+
430
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights
431
+ def _init_weights(self, module):
432
+ """Initialize the weights"""
433
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
434
+ # Slightly different from the TF version which uses truncated_normal for initialization
435
+ # cf https://github.com/pytorch/pytorch/pull/5617
436
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
437
+ if module.bias is not None:
438
+ module.bias.data.zero_()
439
+ elif isinstance(module, nn.Embedding):
440
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
441
+ if module.padding_idx is not None:
442
+ module.weight.data[module.padding_idx].zero_()
443
+ elif isinstance(module, nn.LayerNorm):
444
+ module.bias.data.zero_()
445
+ module.weight.data.fill_(1.0)
446
+
447
+
448
+ GLPN_START_DOCSTRING = r"""
449
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
450
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
451
+ behavior.
452
+
453
+ Parameters:
454
+ config ([`GLPNConfig`]): Model configuration class with all the parameters of the model.
455
+ Initializing with a config file does not load the weights associated with the model, only the
456
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
457
+ """
458
+
459
+ GLPN_INPUTS_DOCSTRING = r"""
460
+
461
+ Args:
462
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
463
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
464
+ [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details.
465
+
466
+ output_attentions (`bool`, *optional*):
467
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
468
+ tensors for more detail.
469
+ output_hidden_states (`bool`, *optional*):
470
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
471
+ more detail.
472
+ return_dict (`bool`, *optional*):
473
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
474
+ """
475
+
476
+
477
+ @add_start_docstrings(
478
+ "The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.",
479
+ GLPN_START_DOCSTRING,
480
+ )
481
+ class GLPNModel(GLPNPreTrainedModel):
482
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN
483
+ def __init__(self, config):
484
+ super().__init__(config)
485
+ self.config = config
486
+
487
+ # hierarchical Transformer encoder
488
+ self.encoder = GLPNEncoder(config)
489
+
490
+ # Initialize weights and apply final processing
491
+ self.post_init()
492
+
493
+ def _prune_heads(self, heads_to_prune):
494
+ """
495
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
496
+ class PreTrainedModel
497
+ """
498
+ for layer, heads in heads_to_prune.items():
499
+ self.encoder.layer[layer].attention.prune_heads(heads)
500
+
501
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
502
+ @add_code_sample_docstrings(
503
+ checkpoint=_CHECKPOINT_FOR_DOC,
504
+ output_type=BaseModelOutput,
505
+ config_class=_CONFIG_FOR_DOC,
506
+ modality="vision",
507
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
508
+ )
509
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward
510
+ def forward(
511
+ self,
512
+ pixel_values: torch.FloatTensor,
513
+ output_attentions: Optional[bool] = None,
514
+ output_hidden_states: Optional[bool] = None,
515
+ return_dict: Optional[bool] = None,
516
+ ) -> Union[Tuple, BaseModelOutput]:
517
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
518
+ output_hidden_states = (
519
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
520
+ )
521
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
522
+
523
+ encoder_outputs = self.encoder(
524
+ pixel_values,
525
+ output_attentions=output_attentions,
526
+ output_hidden_states=output_hidden_states,
527
+ return_dict=return_dict,
528
+ )
529
+ sequence_output = encoder_outputs[0]
530
+
531
+ if not return_dict:
532
+ return (sequence_output,) + encoder_outputs[1:]
533
+
534
+ return BaseModelOutput(
535
+ last_hidden_state=sequence_output,
536
+ hidden_states=encoder_outputs.hidden_states,
537
+ attentions=encoder_outputs.attentions,
538
+ )
539
+
540
+
541
+ class GLPNSelectiveFeatureFusion(nn.Module):
542
+ """
543
+ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This
544
+ module adaptively selects and integrates local and global features by attaining an attention map for each feature.
545
+ """
546
+
547
+ def __init__(self, in_channel=64):
548
+ super().__init__()
549
+
550
+ self.convolutional_layer1 = nn.Sequential(
551
+ nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1),
552
+ nn.BatchNorm2d(in_channel),
553
+ nn.ReLU(),
554
+ )
555
+
556
+ self.convolutional_layer2 = nn.Sequential(
557
+ nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1),
558
+ nn.BatchNorm2d(int(in_channel / 2)),
559
+ nn.ReLU(),
560
+ )
561
+
562
+ self.convolutional_layer3 = nn.Conv2d(
563
+ in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1
564
+ )
565
+
566
+ self.sigmoid = nn.Sigmoid()
567
+
568
+ def forward(self, local_features, global_features):
569
+ # concatenate features along the channel dimension
570
+ features = torch.cat((local_features, global_features), dim=1)
571
+ # pass through convolutional layers
572
+ features = self.convolutional_layer1(features)
573
+ features = self.convolutional_layer2(features)
574
+ features = self.convolutional_layer3(features)
575
+ # apply sigmoid to get two-channel attention map
576
+ attn = self.sigmoid(features)
577
+ # construct hybrid features by adding element-wise
578
+ hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[
579
+ :, 1, :, :
580
+ ].unsqueeze(1)
581
+
582
+ return hybrid_features
583
+
584
+
585
+ class GLPNDecoderStage(nn.Module):
586
+ def __init__(self, in_channels, out_channels):
587
+ super().__init__()
588
+ should_skip = in_channels == out_channels
589
+ self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity()
590
+ self.fusion = GLPNSelectiveFeatureFusion(out_channels)
591
+ self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
592
+
593
+ def forward(self, hidden_state, residual=None):
594
+ hidden_state = self.convolution(hidden_state)
595
+ if residual is not None:
596
+ hidden_state = self.fusion(hidden_state, residual)
597
+ hidden_state = self.upsample(hidden_state)
598
+
599
+ return hidden_state
600
+
601
+ hidden_state = self.upsample(hidden_state)
602
+ return hidden_state
603
+
604
+
605
+ class GLPNDecoder(nn.Module):
606
+ def __init__(self, config):
607
+ super().__init__()
608
+ # we use features from end -> start
609
+ reserved_hidden_sizes = config.hidden_sizes[::-1]
610
+ out_channels = config.decoder_hidden_size
611
+
612
+ self.stages = nn.ModuleList(
613
+ [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes]
614
+ )
615
+ # don't fuse in first stage
616
+ self.stages[0].fusion = None
617
+
618
+ self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
619
+
620
+ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]:
621
+ stage_hidden_states = []
622
+ stage_hidden_state = None
623
+ for hidden_state, stage in zip(hidden_states[::-1], self.stages):
624
+ stage_hidden_state = stage(hidden_state, stage_hidden_state)
625
+ stage_hidden_states.append(stage_hidden_state)
626
+
627
+ stage_hidden_states[-1] = self.final_upsample(stage_hidden_state)
628
+
629
+ return stage_hidden_states
630
+
631
+
632
+ class SiLogLoss(nn.Module):
633
+ r"""
634
+ Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283).
635
+
636
+ $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
637
+ y_{i}^{*}$.
638
+
639
+ """
640
+
641
+ def __init__(self, lambd=0.5):
642
+ super().__init__()
643
+ self.lambd = lambd
644
+
645
+ def forward(self, pred, target):
646
+ valid_mask = (target > 0).detach()
647
+ diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
648
+ loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
649
+
650
+ return loss
651
+
652
+
653
+ class GLPNDepthEstimationHead(nn.Module):
654
+ def __init__(self, config):
655
+ super().__init__()
656
+
657
+ self.config = config
658
+
659
+ channels = config.decoder_hidden_size
660
+ self.head = nn.Sequential(
661
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1),
662
+ nn.ReLU(inplace=False),
663
+ nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1),
664
+ )
665
+
666
+ def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor:
667
+ # use last features of the decoder
668
+ hidden_states = hidden_states[self.config.head_in_index]
669
+
670
+ hidden_states = self.head(hidden_states)
671
+
672
+ predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth
673
+ predicted_depth = predicted_depth.squeeze(dim=1)
674
+
675
+ return predicted_depth
676
+
677
+
678
+ @add_start_docstrings(
679
+ """GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.""",
680
+ GLPN_START_DOCSTRING,
681
+ )
682
+ class GLPNForDepthEstimation(GLPNPreTrainedModel):
683
+ def __init__(self, config):
684
+ super().__init__(config)
685
+
686
+ self.glpn = GLPNModel(config)
687
+ self.decoder = GLPNDecoder(config)
688
+ self.head = GLPNDepthEstimationHead(config)
689
+
690
+ # Initialize weights and apply final processing
691
+ self.post_init()
692
+
693
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
694
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
695
+ def forward(
696
+ self,
697
+ pixel_values: torch.FloatTensor,
698
+ labels: Optional[torch.FloatTensor] = None,
699
+ output_attentions: Optional[bool] = None,
700
+ output_hidden_states: Optional[bool] = None,
701
+ return_dict: Optional[bool] = None,
702
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
703
+ r"""
704
+ labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
705
+ Ground truth depth estimation maps for computing the loss.
706
+
707
+ Returns:
708
+
709
+ Examples:
710
+
711
+ ```python
712
+ >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
713
+ >>> import torch
714
+ >>> import numpy as np
715
+ >>> from PIL import Image
716
+ >>> import requests
717
+
718
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
719
+ >>> image = Image.open(requests.get(url, stream=True).raw)
720
+
721
+ >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
722
+ >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
723
+
724
+ >>> # prepare image for the model
725
+ >>> inputs = image_processor(images=image, return_tensors="pt")
726
+
727
+ >>> with torch.no_grad():
728
+ ... outputs = model(**inputs)
729
+ ... predicted_depth = outputs.predicted_depth
730
+
731
+ >>> # interpolate to original size
732
+ >>> prediction = torch.nn.functional.interpolate(
733
+ ... predicted_depth.unsqueeze(1),
734
+ ... size=image.size[::-1],
735
+ ... mode="bicubic",
736
+ ... align_corners=False,
737
+ ... )
738
+
739
+ >>> # visualize the prediction
740
+ >>> output = prediction.squeeze().cpu().numpy()
741
+ >>> formatted = (output * 255 / np.max(output)).astype("uint8")
742
+ >>> depth = Image.fromarray(formatted)
743
+ ```"""
744
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
745
+ output_hidden_states = (
746
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
747
+ )
748
+
749
+ outputs = self.glpn(
750
+ pixel_values,
751
+ output_attentions=output_attentions,
752
+ output_hidden_states=True, # we need the intermediate hidden states
753
+ return_dict=return_dict,
754
+ )
755
+
756
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
757
+
758
+ out = self.decoder(hidden_states)
759
+ predicted_depth = self.head(out)
760
+
761
+ loss = None
762
+ if labels is not None:
763
+ loss_fct = SiLogLoss()
764
+ loss = loss_fct(predicted_depth, labels)
765
+
766
+ if not return_dict:
767
+ if output_hidden_states:
768
+ output = (predicted_depth,) + outputs[1:]
769
+ else:
770
+ output = (predicted_depth,) + outputs[2:]
771
+ return ((loss,) + output) if loss is not None else output
772
+
773
+ return DepthEstimatorOutput(
774
+ loss=loss,
775
+ predicted_depth=predicted_depth,
776
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
777
+ attentions=outputs.attentions,
778
+ )