applied-ai-018 commited on
Commit
241ac2a
·
verified ·
1 Parent(s): 19c111d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/15.attention.query_key_value.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step20/zero/19.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/19.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step20/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step20/zero/23.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  7. lm-evaluation-harness/tests/testdata/anli_r1-v0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_adjunct_island-v0-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/blimp_transitive-v0-loglikelihood +1 -0
  12. lm-evaluation-harness/tests/testdata/boolq-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-loglikelihood +1 -0
  14. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_mathematics-v0-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_microeconomics-v0-loglikelihood +1 -0
  16. lm-evaluation-harness/tests/testdata/hendrycksTest-management-v0-loglikelihood +1 -0
  17. lm-evaluation-harness/tests/testdata/hendrycksTest-professional_medicine-v0-loglikelihood +1 -0
  18. lm-evaluation-harness/tests/testdata/lambada_mt_es-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-res.json +1 -0
  20. lm-evaluation-harness/tests/testdata/pile_books3-v1-loglikelihood_rolling +1 -0
  21. lm-evaluation-harness/tests/testdata/pile_wikipedia-v1-loglikelihood_rolling +1 -0
  22. lm-evaluation-harness/tests/testdata/prost-v0-loglikelihood +1 -0
  23. lm-evaluation-harness/tests/testdata/qa4mre_2012-v0-res.json +1 -0
  24. lm-evaluation-harness/tests/testdata/wmt20-en-ru-v0-greedy_until +1 -0
  25. lm-evaluation-harness/tests/testdata/wmt20-en-ru-v0-res.json +1 -0
  26. lm-evaluation-harness/tests/testdata/wnli-v1-loglikelihood +1 -0
  27. venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py +65 -0
  28. venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py +157 -0
  32. venv/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py +937 -0
  33. venv/lib/python3.10/site-packages/transformers/models/git/__init__.py +60 -0
  34. venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/git/configuration_git.py +240 -0
  40. venv/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py +428 -0
  41. venv/lib/python3.10/site-packages/transformers/models/git/modeling_git.py +1543 -0
  42. venv/lib/python3.10/site-packages/transformers/models/git/processing_git.py +113 -0
  43. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__init__.py +69 -0
  44. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/configuration_megatron_bert.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/convert_megatron_bert_checkpoint.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/modeling_megatron_bert.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/configuration_megatron_bert.py +129 -0
  49. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py +334 -0
  50. venv/lib/python3.10/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py +1836 -0
ckpts/universal/global_step20/zero/15.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d37c3bafa9234b87e8db199ccc35f7dabbefc1a4ec14e8268466bc85e565abd
3
+ size 50332749
ckpts/universal/global_step20/zero/19.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb38e30f536c2d8786cfcd759f6a628baaf3aae4e857c30040875f7841e92d76
3
+ size 33555627
ckpts/universal/global_step20/zero/19.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdc6116b6677ff273d0ba7fb2399b6616b67f469eae653c56259e6a1cd873bbb
3
+ size 33555533
ckpts/universal/global_step20/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:992b69359257fb650f2c5553a69c033840c66803f7022f7bbc8975356f9e7fe8
3
+ size 33555612
ckpts/universal/global_step20/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39ffaebce8c821052c7bd63e6c8e77e7f2a4ab605296c04474aff8b9ddbf0e46
3
+ size 33555627
ckpts/universal/global_step20/zero/23.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ee62cfc8824cc480e007fc2f4cba5c79634d0cde52a1a5ea655b66b6b314efa
3
+ size 33555533
lm-evaluation-harness/tests/testdata/anli_r1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"anli_r1": {"acc": 0.334, "acc_stderr": 0.014922019523732967}}, "versions": {"anli_r1": 0}}
lm-evaluation-harness/tests/testdata/blimp_adjunct_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_adjunct_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_adjunct_island": 0}}
lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0bdad31c974ba064e1f1ba931841ec2ba7461e8b0ca54ea5f79f08b6bae0bab5
lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_animate_subject_trans": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_animate_subject_trans": 0}}
lm-evaluation-harness/tests/testdata/blimp_transitive-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d0d47fe40a7ee558ba782edbc4f49f7d9123c8472a36decc97f8ab142b45b9d8
lm-evaluation-harness/tests/testdata/boolq-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"boolq": {"acc": 0.5048929663608562, "acc_stderr": 0.00874463623355505}}, "versions": {"boolq": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ e35d1eeb356ac1084d4e9773f028cb3c81ba1c6e5574d598ac4a78aa467cd797
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_mathematics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_mathematics": {"acc": 0.22592592592592592, "acc_norm": 0.24814814814814815, "acc_norm_stderr": 0.0263357394040558, "acc_stderr": 0.025497532639609553}}, "versions": {"hendrycksTest-high_school_mathematics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_microeconomics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 513b998585ebc1ebdefca6435b7c84fd73dc36fc80321a22503467f04efed23e
lm-evaluation-harness/tests/testdata/hendrycksTest-management-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 355489f4bd176ab84db5ef4c03d56ddeeeb1b0ad69827122b2d800e1cdc7e5f0
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_medicine-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7a30599858398169cde61430c18efdd7fb4dcd09c34aa9baba70f0f8cf17a9f1
lm-evaluation-harness/tests/testdata/lambada_mt_es-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_mt_es": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_es": 0}}
lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_mt_fr": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_fr": 0}}
lm-evaluation-harness/tests/testdata/pile_books3-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 0f8f36f705b999b6d55fa72ff89a82793dd1cb568ab1f8727a6a2086a12b9410
lm-evaluation-harness/tests/testdata/pile_wikipedia-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ ef9ec0dd408316ca6537228a6812e839f14b30608973081d41efc47c138338da
lm-evaluation-harness/tests/testdata/prost-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7c475f5b36a8b79f94c2be035441e7fd59dac021b0713b1fc72d256424c70b0b
lm-evaluation-harness/tests/testdata/qa4mre_2012-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"qa4mre_2012": {"acc": 0.15625, "acc_norm": 0.16875, "acc_norm_stderr": 0.029702236908328808, "acc_stderr": 0.02879508360159146}}, "versions": {"qa4mre_2012": 0}}
lm-evaluation-harness/tests/testdata/wmt20-en-ru-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ a1613831f69c1679a54670092af40ce76617b79d7cc837984803b0fc52bb8bde
lm-evaluation-harness/tests/testdata/wmt20-en-ru-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-ru": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0007327811114614671, "chrf_stderr": 4.43155903515048e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ru": 0}}
lm-evaluation-harness/tests/testdata/wnli-v1-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8a0f81661d2ab2334bbc8031fac31c0c8882f1d9271dd51599d21dfdbb726dea
venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_decision_transformer": [
21
+ "DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "DecisionTransformerConfig",
23
+ ],
24
+ }
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_decision_transformer"] = [
33
+ "DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
34
+ "DecisionTransformerGPT2Model",
35
+ "DecisionTransformerGPT2PreTrainedModel",
36
+ "DecisionTransformerModel",
37
+ "DecisionTransformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_decision_transformer import (
43
+ DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ DecisionTransformerConfig,
45
+ )
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_decision_transformer import (
54
+ DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ DecisionTransformerGPT2Model,
56
+ DecisionTransformerGPT2PreTrainedModel,
57
+ DecisionTransformerModel,
58
+ DecisionTransformerPreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc ADDED
Binary file (25.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Decision Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class DecisionTransformerConfig(PretrainedConfig):
28
+ """
29
+ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to
30
+ instantiate a Decision Transformer model according to the specified arguments, defining the model architecture.
31
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the standard
32
+ DecisionTransformer architecture. Many of the config options are used to instatiate the GPT2 model that is used as
33
+ part of the architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ state_dim (`int`, *optional*, defaults to 17):
41
+ The state size for the RL environment
42
+ act_dim (`int`, *optional*, defaults to 4):
43
+ The size of the output action space
44
+ hidden_size (`int`, *optional*, defaults to 128):
45
+ The size of the hidden layers
46
+ max_ep_len (`int`, *optional*, defaults to 4096):
47
+ The maximum length of an episode in the environment
48
+ action_tanh (`bool`, *optional*, defaults to True):
49
+ Whether to use a tanh activation on action prediction
50
+ vocab_size (`int`, *optional*, defaults to 50257):
51
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
52
+ `inputs_ids` passed when calling [`DecisionTransformerModel`].
53
+ n_positions (`int`, *optional*, defaults to 1024):
54
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
55
+ just in case (e.g., 512 or 1024 or 2048).
56
+ n_layer (`int`, *optional*, defaults to 3):
57
+ Number of hidden layers in the Transformer encoder.
58
+ n_head (`int`, *optional*, defaults to 1):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ n_inner (`int`, *optional*):
61
+ Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`.
62
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
63
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
64
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
65
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
66
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
67
+ The dropout ratio for the embeddings.
68
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
69
+ The dropout ratio for the attention.
70
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
71
+ The epsilon to use in the layer normalization layers.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
75
+ Scale attention weights by dividing by sqrt(hidden_size)..
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models).
78
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
79
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
80
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
81
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
82
+ dot-product/softmax to float() when training with mixed precision.
83
+
84
+ Example:
85
+
86
+ ```python
87
+ >>> from transformers import DecisionTransformerConfig, DecisionTransformerModel
88
+
89
+ >>> # Initializing a DecisionTransformer configuration
90
+ >>> configuration = DecisionTransformerConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the configuration
93
+ >>> model = DecisionTransformerModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "decision_transformer"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+ attribute_map = {
102
+ "max_position_embeddings": "n_positions",
103
+ "num_attention_heads": "n_head",
104
+ "num_hidden_layers": "n_layer",
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ state_dim=17,
110
+ act_dim=4,
111
+ hidden_size=128,
112
+ max_ep_len=4096,
113
+ action_tanh=True,
114
+ vocab_size=1,
115
+ n_positions=1024,
116
+ n_layer=3,
117
+ n_head=1,
118
+ n_inner=None,
119
+ activation_function="relu",
120
+ resid_pdrop=0.1,
121
+ embd_pdrop=0.1,
122
+ attn_pdrop=0.1,
123
+ layer_norm_epsilon=1e-5,
124
+ initializer_range=0.02,
125
+ scale_attn_weights=True,
126
+ use_cache=True,
127
+ bos_token_id=50256,
128
+ eos_token_id=50256,
129
+ scale_attn_by_inverse_layer_idx=False,
130
+ reorder_and_upcast_attn=False,
131
+ **kwargs,
132
+ ):
133
+ self.state_dim = state_dim
134
+ self.act_dim = act_dim
135
+ self.hidden_size = hidden_size
136
+ self.max_ep_len = max_ep_len
137
+ self.action_tanh = action_tanh
138
+ self.vocab_size = vocab_size
139
+ self.n_positions = n_positions
140
+ self.n_layer = n_layer
141
+ self.n_head = n_head
142
+ self.n_inner = n_inner
143
+ self.activation_function = activation_function
144
+ self.resid_pdrop = resid_pdrop
145
+ self.embd_pdrop = embd_pdrop
146
+ self.attn_pdrop = attn_pdrop
147
+ self.layer_norm_epsilon = layer_norm_epsilon
148
+ self.initializer_range = initializer_range
149
+ self.scale_attn_weights = scale_attn_weights
150
+ self.use_cache = use_cache
151
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
152
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
153
+
154
+ self.bos_token_id = bos_token_id
155
+ self.eos_token_id = eos_token_id
156
+
157
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py ADDED
@@ -0,0 +1,937 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DecisionTransformer model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.cuda.amp import autocast
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
31
+ from ...utils import (
32
+ ModelOutput,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_decision_transformer import DecisionTransformerConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
44
+ _CONFIG_FOR_DOC = "DecisionTransformerConfig"
45
+
46
+
47
+ from ..deprecated._archive_maps import DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ # Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
51
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
52
+ """Load tf checkpoints in a pytorch model"""
53
+ try:
54
+ import re
55
+
56
+ import tensorflow as tf
57
+ except ImportError:
58
+ logger.error(
59
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
60
+ "https://www.tensorflow.org/install/ for installation instructions."
61
+ )
62
+ raise
63
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
64
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
65
+ # Load weights from TF model
66
+ init_vars = tf.train.list_variables(tf_path)
67
+ names = []
68
+ arrays = []
69
+ for name, shape in init_vars:
70
+ logger.info(f"Loading TF weight {name} with shape {shape}")
71
+ array = tf.train.load_variable(tf_path, name)
72
+ names.append(name)
73
+ arrays.append(array.squeeze())
74
+
75
+ for name, array in zip(names, arrays):
76
+ name = name[6:] # skip "model/"
77
+ name = name.split("/")
78
+ pointer = model
79
+ for m_name in name:
80
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
81
+ scope_names = re.split(r"(\d+)", m_name)
82
+ else:
83
+ scope_names = [m_name]
84
+ if scope_names[0] == "w" or scope_names[0] == "g":
85
+ pointer = getattr(pointer, "weight")
86
+ elif scope_names[0] == "b":
87
+ pointer = getattr(pointer, "bias")
88
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
89
+ pointer = getattr(pointer, scope_names[0])
90
+ pointer = getattr(pointer, "weight")
91
+ else:
92
+ pointer = getattr(pointer, scope_names[0])
93
+ if len(scope_names) >= 2:
94
+ num = int(scope_names[1])
95
+ pointer = pointer[num]
96
+ try:
97
+ if pointer.shape != array.shape:
98
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
99
+ except ValueError as e:
100
+ e.args += (pointer.shape, array.shape)
101
+ raise
102
+ logger.info(f"Initialize PyTorch weight {name}")
103
+ pointer.data = torch.from_numpy(array)
104
+ return model
105
+
106
+
107
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
108
+ class DecisionTransformerGPT2Attention(nn.Module):
109
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
110
+ super().__init__()
111
+ self.config = config
112
+ max_positions = config.max_position_embeddings
113
+ self.register_buffer(
114
+ "bias",
115
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
116
+ 1, 1, max_positions, max_positions
117
+ ),
118
+ persistent=False,
119
+ )
120
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
121
+
122
+ self.embed_dim = config.hidden_size
123
+ self.num_heads = config.num_attention_heads
124
+ self.head_dim = self.embed_dim // self.num_heads
125
+ self.split_size = self.embed_dim
126
+ if self.head_dim * self.num_heads != self.embed_dim:
127
+ raise ValueError(
128
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
129
+ f" {self.num_heads})."
130
+ )
131
+
132
+ self.scale_attn_weights = config.scale_attn_weights
133
+ self.is_cross_attention = is_cross_attention
134
+
135
+ # Layer-wise attention scaling, reordering, and upcasting
136
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
137
+ self.layer_idx = layer_idx
138
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
139
+
140
+ if self.is_cross_attention:
141
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
142
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
143
+ else:
144
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
145
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
146
+
147
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
148
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
149
+ self.is_causal = True
150
+
151
+ self.pruned_heads = set()
152
+
153
+ def prune_heads(self, heads):
154
+ if len(heads) == 0:
155
+ return
156
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
157
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
158
+
159
+ # Prune conv1d layers
160
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
161
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
162
+
163
+ # Update hyper params
164
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
165
+ self.num_heads = self.num_heads - len(heads)
166
+ self.pruned_heads = self.pruned_heads.union(heads)
167
+
168
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
169
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
170
+
171
+ if self.scale_attn_weights:
172
+ attn_weights = attn_weights / torch.full(
173
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
174
+ )
175
+
176
+ # Layer-wise attention scaling
177
+ if self.scale_attn_by_inverse_layer_idx:
178
+ attn_weights = attn_weights / float(self.layer_idx + 1)
179
+
180
+ if not self.is_cross_attention:
181
+ # if only "normal" attention layer implements causal mask
182
+ query_length, key_length = query.size(-2), key.size(-2)
183
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
184
+ mask_value = torch.finfo(attn_weights.dtype).min
185
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
186
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
187
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
188
+ attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
189
+
190
+ if attention_mask is not None:
191
+ # Apply the attention mask
192
+ attn_weights = attn_weights + attention_mask
193
+
194
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
195
+
196
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
197
+ attn_weights = attn_weights.type(value.dtype)
198
+ attn_weights = self.attn_dropout(attn_weights)
199
+
200
+ # Mask heads if we want to
201
+ if head_mask is not None:
202
+ attn_weights = attn_weights * head_mask
203
+
204
+ attn_output = torch.matmul(attn_weights, value)
205
+
206
+ return attn_output, attn_weights
207
+
208
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
209
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
210
+ bsz, num_heads, q_seq_len, dk = query.size()
211
+ _, _, k_seq_len, _ = key.size()
212
+
213
+ # Preallocate attn_weights for `baddbmm`
214
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
215
+
216
+ # Compute Scale Factor
217
+ scale_factor = 1.0
218
+ if self.scale_attn_weights:
219
+ scale_factor /= float(value.size(-1)) ** 0.5
220
+
221
+ if self.scale_attn_by_inverse_layer_idx:
222
+ scale_factor /= float(self.layer_idx + 1)
223
+
224
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
225
+ with autocast(enabled=False):
226
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
227
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
228
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
229
+
230
+ if not self.is_cross_attention:
231
+ # if only "normal" attention layer implements causal mask
232
+ query_length, key_length = query.size(-2), key.size(-2)
233
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
234
+ mask_value = torch.finfo(attn_weights.dtype).min
235
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
236
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
237
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
238
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
239
+
240
+ if attention_mask is not None:
241
+ # Apply the attention mask
242
+ attn_weights = attn_weights + attention_mask
243
+
244
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
245
+
246
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
247
+ if attn_weights.dtype != torch.float32:
248
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
249
+ attn_weights = attn_weights.type(value.dtype)
250
+ attn_weights = self.attn_dropout(attn_weights)
251
+
252
+ # Mask heads if we want to
253
+ if head_mask is not None:
254
+ attn_weights = attn_weights * head_mask
255
+
256
+ attn_output = torch.matmul(attn_weights, value)
257
+
258
+ return attn_output, attn_weights
259
+
260
+ def _split_heads(self, tensor, num_heads, attn_head_size):
261
+ """
262
+ Splits hidden_size dim into attn_head_size and num_heads
263
+ """
264
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
265
+ tensor = tensor.view(new_shape)
266
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
267
+
268
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
269
+ """
270
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
271
+ """
272
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
273
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
274
+ return tensor.view(new_shape)
275
+
276
+ def forward(
277
+ self,
278
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
279
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
280
+ attention_mask: Optional[torch.FloatTensor] = None,
281
+ head_mask: Optional[torch.FloatTensor] = None,
282
+ encoder_hidden_states: Optional[torch.Tensor] = None,
283
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
284
+ use_cache: Optional[bool] = False,
285
+ output_attentions: Optional[bool] = False,
286
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
287
+ if encoder_hidden_states is not None:
288
+ if not hasattr(self, "q_attn"):
289
+ raise ValueError(
290
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
291
+ "Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
292
+ )
293
+
294
+ query = self.q_attn(hidden_states)
295
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
296
+ attention_mask = encoder_attention_mask
297
+ else:
298
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
299
+
300
+ query = self._split_heads(query, self.num_heads, self.head_dim)
301
+ key = self._split_heads(key, self.num_heads, self.head_dim)
302
+ value = self._split_heads(value, self.num_heads, self.head_dim)
303
+
304
+ if layer_past is not None:
305
+ past_key, past_value = layer_past
306
+ key = torch.cat((past_key, key), dim=-2)
307
+ value = torch.cat((past_value, value), dim=-2)
308
+
309
+ if use_cache is True:
310
+ present = (key, value)
311
+ else:
312
+ present = None
313
+
314
+ if self.reorder_and_upcast_attn:
315
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
316
+ else:
317
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
318
+
319
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
320
+ attn_output = self.c_proj(attn_output)
321
+ attn_output = self.resid_dropout(attn_output)
322
+
323
+ outputs = (attn_output, present)
324
+ if output_attentions:
325
+ outputs += (attn_weights,)
326
+
327
+ return outputs # a, present, (attentions)
328
+
329
+
330
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
331
+ class DecisionTransformerGPT2MLP(nn.Module):
332
+ def __init__(self, intermediate_size, config):
333
+ super().__init__()
334
+ embed_dim = config.hidden_size
335
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
336
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
337
+ self.act = ACT2FN[config.activation_function]
338
+ self.dropout = nn.Dropout(config.resid_pdrop)
339
+
340
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
341
+ hidden_states = self.c_fc(hidden_states)
342
+ hidden_states = self.act(hidden_states)
343
+ hidden_states = self.c_proj(hidden_states)
344
+ hidden_states = self.dropout(hidden_states)
345
+ return hidden_states
346
+
347
+
348
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
349
+ class DecisionTransformerGPT2Block(nn.Module):
350
+ # Ignore copy
351
+ def __init__(self, config, layer_idx=None):
352
+ super().__init__()
353
+ hidden_size = config.hidden_size
354
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
355
+
356
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
357
+ self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
358
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
359
+
360
+ if config.add_cross_attention:
361
+ self.crossattention = DecisionTransformerGPT2Attention(
362
+ config, is_cross_attention=True, layer_idx=layer_idx
363
+ )
364
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
365
+
366
+ self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
367
+
368
+ def forward(
369
+ self,
370
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
371
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
372
+ attention_mask: Optional[torch.FloatTensor] = None,
373
+ head_mask: Optional[torch.FloatTensor] = None,
374
+ encoder_hidden_states: Optional[torch.Tensor] = None,
375
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
376
+ use_cache: Optional[bool] = False,
377
+ output_attentions: Optional[bool] = False,
378
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
379
+ residual = hidden_states
380
+ hidden_states = self.ln_1(hidden_states)
381
+ attn_outputs = self.attn(
382
+ hidden_states,
383
+ layer_past=layer_past,
384
+ attention_mask=attention_mask,
385
+ head_mask=head_mask,
386
+ use_cache=use_cache,
387
+ output_attentions=output_attentions,
388
+ )
389
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
390
+ outputs = attn_outputs[1:]
391
+ # residual connection
392
+ hidden_states = attn_output + residual
393
+
394
+ if encoder_hidden_states is not None:
395
+ # add one self-attention block for cross-attention
396
+ if not hasattr(self, "crossattention"):
397
+ raise ValueError(
398
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
399
+ "cross-attention layers by setting `config.add_cross_attention=True`"
400
+ )
401
+ residual = hidden_states
402
+ hidden_states = self.ln_cross_attn(hidden_states)
403
+ cross_attn_outputs = self.crossattention(
404
+ hidden_states,
405
+ attention_mask=attention_mask,
406
+ head_mask=head_mask,
407
+ encoder_hidden_states=encoder_hidden_states,
408
+ encoder_attention_mask=encoder_attention_mask,
409
+ output_attentions=output_attentions,
410
+ )
411
+ attn_output = cross_attn_outputs[0]
412
+ # residual connection
413
+ hidden_states = residual + attn_output
414
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
415
+
416
+ residual = hidden_states
417
+ hidden_states = self.ln_2(hidden_states)
418
+ feed_forward_hidden_states = self.mlp(hidden_states)
419
+ # residual connection
420
+ hidden_states = residual + feed_forward_hidden_states
421
+
422
+ if use_cache:
423
+ outputs = (hidden_states,) + outputs
424
+ else:
425
+ outputs = (hidden_states,) + outputs[1:]
426
+
427
+ return outputs # hidden_states, present, (attentions, cross_attentions)
428
+
429
+
430
+ class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
431
+ """
432
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
433
+ models.
434
+ """
435
+
436
+ config_class = DecisionTransformerConfig
437
+ load_tf_weights = load_tf_weights_in_gpt2
438
+ base_model_prefix = "transformer"
439
+ is_parallelizable = True
440
+ supports_gradient_checkpointing = True
441
+
442
+ def __init__(self, *inputs, **kwargs):
443
+ super().__init__(*inputs, **kwargs)
444
+
445
+ def _init_weights(self, module):
446
+ """Initialize the weights."""
447
+ if isinstance(module, (nn.Linear, Conv1D)):
448
+ # Slightly different from the TF version which uses truncated_normal for initialization
449
+ # cf https://github.com/pytorch/pytorch/pull/5617
450
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
451
+ if module.bias is not None:
452
+ module.bias.data.zero_()
453
+ elif isinstance(module, nn.Embedding):
454
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
455
+ if module.padding_idx is not None:
456
+ module.weight.data[module.padding_idx].zero_()
457
+ elif isinstance(module, nn.LayerNorm):
458
+ module.bias.data.zero_()
459
+ module.weight.data.fill_(1.0)
460
+
461
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
462
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
463
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
464
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
465
+ #
466
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
467
+ for name, p in module.named_parameters():
468
+ if "c_proj" in name and "weight" in name:
469
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
470
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
471
+
472
+
473
+ class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
474
+ def __init__(self, config):
475
+ super().__init__(config)
476
+
477
+ self.embed_dim = config.hidden_size
478
+
479
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
480
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
481
+
482
+ self.drop = nn.Dropout(config.embd_pdrop)
483
+ self.h = nn.ModuleList(
484
+ [DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
485
+ )
486
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
487
+
488
+ # Model parallel
489
+ self.model_parallel = False
490
+ self.device_map = None
491
+ self.gradient_checkpointing = False
492
+
493
+ # Initialize weights and apply final processing
494
+ self.post_init()
495
+
496
+ def get_input_embeddings(self):
497
+ return self.wte
498
+
499
+ def set_input_embeddings(self, new_embeddings):
500
+ self.wte = new_embeddings
501
+
502
+ def forward(
503
+ self,
504
+ input_ids: Optional[torch.LongTensor] = None,
505
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
506
+ attention_mask: Optional[torch.FloatTensor] = None,
507
+ token_type_ids: Optional[torch.LongTensor] = None,
508
+ position_ids: Optional[torch.LongTensor] = None,
509
+ head_mask: Optional[torch.FloatTensor] = None,
510
+ inputs_embeds: Optional[torch.FloatTensor] = None,
511
+ encoder_hidden_states: Optional[torch.Tensor] = None,
512
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
513
+ use_cache: Optional[bool] = None,
514
+ output_attentions: Optional[bool] = None,
515
+ output_hidden_states: Optional[bool] = None,
516
+ return_dict: Optional[bool] = None,
517
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
518
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
519
+ output_hidden_states = (
520
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
521
+ )
522
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
523
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
524
+
525
+ if input_ids is not None and inputs_embeds is not None:
526
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
527
+ elif input_ids is not None:
528
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
529
+ input_shape = input_ids.size()
530
+ input_ids = input_ids.view(-1, input_shape[-1])
531
+ batch_size = input_ids.shape[0]
532
+ elif inputs_embeds is not None:
533
+ input_shape = inputs_embeds.size()[:-1]
534
+ batch_size = inputs_embeds.shape[0]
535
+ else:
536
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
537
+
538
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
539
+
540
+ if token_type_ids is not None:
541
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
542
+
543
+ if past_key_values is None:
544
+ past_length = 0
545
+ past_key_values = tuple([None] * len(self.h))
546
+ else:
547
+ past_length = past_key_values[0][0].size(-2)
548
+ if position_ids is None:
549
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
550
+ position_ids = position_ids.unsqueeze(0)
551
+
552
+ # Attention mask.
553
+ if attention_mask is not None:
554
+ if batch_size <= 0:
555
+ raise ValueError("batch_size has to be defined and > 0")
556
+ attention_mask = attention_mask.view(batch_size, -1)
557
+ # We create a 3D attention mask from a 2D tensor mask.
558
+ # Sizes are [batch_size, 1, 1, to_seq_length]
559
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
560
+ # this attention mask is more simple than the triangular masking of causal attention
561
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
562
+ attention_mask = attention_mask[:, None, None, :]
563
+
564
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
565
+ # masked positions, this operation will create a tensor which is 0.0 for
566
+ # positions we want to attend and the dtype's smallest value for masked positions.
567
+ # Since we are adding it to the raw scores before the softmax, this is
568
+ # effectively the same as removing these entirely.
569
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
570
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
571
+
572
+ # If a 2D or 3D attention mask is provided for the cross-attention
573
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
574
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
575
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
576
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
577
+ if encoder_attention_mask is None:
578
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
579
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
580
+ else:
581
+ encoder_attention_mask = None
582
+
583
+ # Prepare head mask if needed
584
+ # 1.0 in head_mask indicate we keep the head
585
+ # attention_probs has shape bsz x n_heads x N x N
586
+ # head_mask has shape n_layer x batch x n_heads x N x N
587
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
588
+
589
+ if inputs_embeds is None:
590
+ inputs_embeds = self.wte(input_ids)
591
+ position_embeds = self.wpe(position_ids)
592
+ hidden_states = inputs_embeds + position_embeds
593
+
594
+ if token_type_ids is not None:
595
+ token_type_embeds = self.wte(token_type_ids)
596
+ hidden_states = hidden_states + token_type_embeds
597
+
598
+ hidden_states = self.drop(hidden_states)
599
+
600
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
601
+
602
+ if self.gradient_checkpointing and self.training:
603
+ if use_cache:
604
+ logger.warning_once(
605
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
606
+ )
607
+ use_cache = False
608
+
609
+ presents = () if use_cache else None
610
+ all_self_attentions = () if output_attentions else None
611
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
612
+ all_hidden_states = () if output_hidden_states else None
613
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
614
+ # Model parallel
615
+ if self.model_parallel:
616
+ torch.cuda.set_device(hidden_states.device)
617
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
618
+ if layer_past is not None:
619
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
620
+ # Ensure that attention_mask is always on the same device as hidden_states
621
+ if attention_mask is not None:
622
+ attention_mask = attention_mask.to(hidden_states.device)
623
+ if isinstance(head_mask, torch.Tensor):
624
+ head_mask = head_mask.to(hidden_states.device)
625
+ if output_hidden_states:
626
+ all_hidden_states = all_hidden_states + (hidden_states,)
627
+
628
+ if self.gradient_checkpointing and self.training:
629
+ outputs = self._gradient_checkpointing_func(
630
+ block.__call__,
631
+ hidden_states,
632
+ None,
633
+ attention_mask,
634
+ head_mask[i],
635
+ encoder_hidden_states,
636
+ encoder_attention_mask,
637
+ use_cache,
638
+ output_attentions,
639
+ )
640
+ else:
641
+ outputs = block(
642
+ hidden_states,
643
+ layer_past=layer_past,
644
+ attention_mask=attention_mask,
645
+ head_mask=head_mask[i],
646
+ encoder_hidden_states=encoder_hidden_states,
647
+ encoder_attention_mask=encoder_attention_mask,
648
+ use_cache=use_cache,
649
+ output_attentions=output_attentions,
650
+ )
651
+
652
+ hidden_states = outputs[0]
653
+ if use_cache is True:
654
+ presents = presents + (outputs[1],)
655
+
656
+ if output_attentions:
657
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
658
+ if self.config.add_cross_attention:
659
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
660
+
661
+ # Model Parallel: If it's the last layer for that device, put things on the next device
662
+ if self.model_parallel:
663
+ for k, v in self.device_map.items():
664
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
665
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
666
+
667
+ hidden_states = self.ln_f(hidden_states)
668
+
669
+ hidden_states = hidden_states.view(output_shape)
670
+ # Add last hidden state
671
+ if output_hidden_states:
672
+ all_hidden_states = all_hidden_states + (hidden_states,)
673
+
674
+ if not return_dict:
675
+ return tuple(
676
+ v
677
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
678
+ if v is not None
679
+ )
680
+
681
+ return BaseModelOutputWithPastAndCrossAttentions(
682
+ last_hidden_state=hidden_states,
683
+ past_key_values=presents,
684
+ hidden_states=all_hidden_states,
685
+ attentions=all_self_attentions,
686
+ cross_attentions=all_cross_attentions,
687
+ )
688
+
689
+
690
+ @dataclass
691
+ class DecisionTransformerOutput(ModelOutput):
692
+ """
693
+ Base class for model's outputs that also contains a pooling of the last hidden states.
694
+
695
+ Args:
696
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
697
+ Sequence of hidden-states at the output of the last layer of the model.
698
+ state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
699
+ Environment state predictions
700
+ action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
701
+ Model action predictions
702
+ return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
703
+ Predicted returns for each state
704
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
705
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
706
+ shape `(batch_size, sequence_length, hidden_size)`.
707
+
708
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
709
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
710
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
711
+ sequence_length)`.
712
+
713
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
714
+ heads.
715
+ """
716
+
717
+ state_preds: torch.FloatTensor = None
718
+ action_preds: torch.FloatTensor = None
719
+ return_preds: torch.FloatTensor = None
720
+ hidden_states: torch.FloatTensor = None
721
+ attentions: torch.FloatTensor = None
722
+ last_hidden_state: torch.FloatTensor = None
723
+
724
+
725
+ class DecisionTransformerPreTrainedModel(PreTrainedModel):
726
+ """
727
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
728
+ models.
729
+ """
730
+
731
+ config_class = DecisionTransformerConfig
732
+ base_model_prefix = "decision_transformer"
733
+ main_input_name = "states"
734
+ supports_gradient_checkpointing = False
735
+
736
+ def _init_weights(self, module):
737
+ """Initialize the weights"""
738
+ if isinstance(module, nn.Linear):
739
+ # Slightly different from the TF version which uses truncated_normal for initialization
740
+ # cf https://github.com/pytorch/pytorch/pull/5617
741
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
742
+ if module.bias is not None:
743
+ module.bias.data.zero_()
744
+ elif isinstance(module, nn.Embedding):
745
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
746
+ if module.padding_idx is not None:
747
+ module.weight.data[module.padding_idx].zero_()
748
+ elif isinstance(module, nn.LayerNorm):
749
+ module.bias.data.zero_()
750
+ module.weight.data.fill_(1.0)
751
+
752
+
753
+ DECISION_TRANSFORMER_START_DOCSTRING = r"""
754
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
755
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
756
+ behavior.
757
+
758
+ Parameters:
759
+ config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
760
+ Initializing with a config file does not load the weights associated with the model, only the
761
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
762
+ """
763
+
764
+ DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
765
+ Args:
766
+ states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
767
+ The states for each step in the trajectory
768
+ actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
769
+ The actions taken by the "expert" policy for the current state, these are masked for auto regressive
770
+ prediction
771
+ rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
772
+ The rewards for each state, action
773
+ returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
774
+ The returns for each state in the trajectory
775
+ timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
776
+ The timestep for each step in the trajectory
777
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, episode_length)`):
778
+ Masking, used to mask the actions when performing autoregressive prediction
779
+ """
780
+
781
+
782
+ @add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
783
+ class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
784
+ """
785
+
786
+ The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
787
+ setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
788
+
789
+ """
790
+
791
+ def __init__(self, config):
792
+ super().__init__(config)
793
+ self.config = config
794
+ self.hidden_size = config.hidden_size
795
+ # note: the only difference between this GPT2Model and the default Huggingface version
796
+ # is that the positional embeddings are removed (since we'll add those ourselves)
797
+ self.encoder = DecisionTransformerGPT2Model(config)
798
+
799
+ self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
800
+ self.embed_return = torch.nn.Linear(1, config.hidden_size)
801
+ self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
802
+ self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
803
+
804
+ self.embed_ln = nn.LayerNorm(config.hidden_size)
805
+
806
+ # note: we don't predict states or returns for the paper
807
+ self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
808
+ self.predict_action = nn.Sequential(
809
+ *([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
810
+ )
811
+ self.predict_return = torch.nn.Linear(config.hidden_size, 1)
812
+
813
+ # Initialize weights and apply final processing
814
+ self.post_init()
815
+
816
+ @add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
817
+ @replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
818
+ def forward(
819
+ self,
820
+ states: Optional[torch.FloatTensor] = None,
821
+ actions: Optional[torch.FloatTensor] = None,
822
+ rewards: Optional[torch.FloatTensor] = None,
823
+ returns_to_go: Optional[torch.FloatTensor] = None,
824
+ timesteps: Optional[torch.LongTensor] = None,
825
+ attention_mask: Optional[torch.FloatTensor] = None,
826
+ output_hidden_states: Optional[bool] = None,
827
+ output_attentions: Optional[bool] = None,
828
+ return_dict: Optional[bool] = None,
829
+ ) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
830
+ r"""
831
+ Returns:
832
+
833
+ Examples:
834
+
835
+ ```python
836
+ >>> from transformers import DecisionTransformerModel
837
+ >>> import torch
838
+
839
+ >>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
840
+ >>> # evaluation
841
+ >>> model = model.to(device)
842
+ >>> model.eval()
843
+
844
+ >>> env = gym.make("Hopper-v3")
845
+ >>> state_dim = env.observation_space.shape[0]
846
+ >>> act_dim = env.action_space.shape[0]
847
+
848
+ >>> state = env.reset()
849
+ >>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
850
+ >>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
851
+ >>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
852
+ >>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
853
+ >>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
854
+ >>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
855
+
856
+ >>> # forward pass
857
+ >>> with torch.no_grad():
858
+ ... state_preds, action_preds, return_preds = model(
859
+ ... states=states,
860
+ ... actions=actions,
861
+ ... rewards=rewards,
862
+ ... returns_to_go=target_return,
863
+ ... timesteps=timesteps,
864
+ ... attention_mask=attention_mask,
865
+ ... return_dict=False,
866
+ ... )
867
+ ```"""
868
+
869
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
870
+ output_hidden_states = (
871
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
872
+ )
873
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
874
+
875
+ batch_size, seq_length = states.shape[0], states.shape[1]
876
+
877
+ if attention_mask is None:
878
+ # attention mask for GPT: 1 if can be attended to, 0 if not
879
+ attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
880
+
881
+ # embed each modality with a different head
882
+ state_embeddings = self.embed_state(states)
883
+ action_embeddings = self.embed_action(actions)
884
+ returns_embeddings = self.embed_return(returns_to_go)
885
+ time_embeddings = self.embed_timestep(timesteps)
886
+
887
+ # time embeddings are treated similar to positional embeddings
888
+ state_embeddings = state_embeddings + time_embeddings
889
+ action_embeddings = action_embeddings + time_embeddings
890
+ returns_embeddings = returns_embeddings + time_embeddings
891
+
892
+ # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
893
+ # which works nice in an autoregressive sense since states predict actions
894
+ stacked_inputs = (
895
+ torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
896
+ .permute(0, 2, 1, 3)
897
+ .reshape(batch_size, 3 * seq_length, self.hidden_size)
898
+ )
899
+ stacked_inputs = self.embed_ln(stacked_inputs)
900
+
901
+ # to make the attention mask fit the stacked inputs, have to stack it as well
902
+ stacked_attention_mask = (
903
+ torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
904
+ .permute(0, 2, 1)
905
+ .reshape(batch_size, 3 * seq_length)
906
+ )
907
+ device = stacked_inputs.device
908
+ # we feed in the input embeddings (not word indices as in NLP) to the model
909
+ encoder_outputs = self.encoder(
910
+ inputs_embeds=stacked_inputs,
911
+ attention_mask=stacked_attention_mask,
912
+ position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
913
+ output_attentions=output_attentions,
914
+ output_hidden_states=output_hidden_states,
915
+ return_dict=return_dict,
916
+ )
917
+ x = encoder_outputs[0]
918
+
919
+ # reshape x so that the second dimension corresponds to the original
920
+ # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
921
+ x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
922
+
923
+ # get predictions
924
+ return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
925
+ state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
926
+ action_preds = self.predict_action(x[:, 1]) # predict next action given state
927
+ if not return_dict:
928
+ return (state_preds, action_preds, return_preds)
929
+
930
+ return DecisionTransformerOutput(
931
+ last_hidden_state=encoder_outputs.last_hidden_state,
932
+ state_preds=state_preds,
933
+ action_preds=action_preds,
934
+ return_preds=return_preds,
935
+ hidden_states=encoder_outputs.hidden_states,
936
+ attentions=encoder_outputs.attentions,
937
+ )
venv/lib/python3.10/site-packages/transformers/models/git/__init__.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
22
+ "processing_git": ["GitProcessor"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_git"] = [
32
+ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "GitForCausalLM",
34
+ "GitModel",
35
+ "GitPreTrainedModel",
36
+ "GitVisionModel",
37
+ ]
38
+
39
+ if TYPE_CHECKING:
40
+ from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
41
+ from .processing_git import GitProcessor
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ from .modeling_git import (
50
+ GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
51
+ GitForCausalLM,
52
+ GitModel,
53
+ GitPreTrainedModel,
54
+ GitVisionModel,
55
+ )
56
+
57
+ else:
58
+ import sys
59
+
60
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (993 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc ADDED
Binary file (9.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc ADDED
Binary file (48.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc ADDED
Binary file (5.04 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/git/configuration_git.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from typing import Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class GitVisionConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`GitVisionModel`]. It is used to instantiate a GIT
32
+ vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the vision encoder of the GIT
34
+ [microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ hidden_size (`int`, *optional*, defaults to 768):
41
+ Dimensionality of the encoder layers and the pooler layer.
42
+ intermediate_size (`int`, *optional*, defaults to 3072):
43
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
44
+ num_hidden_layers (`int`, *optional*, defaults to 12):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 12):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ image_size (`int`, *optional*, defaults to 224):
49
+ The size (resolution) of each image.
50
+ patch_size (`int`, *optional*, defaults to 16):
51
+ The size (resolution) of each patch.
52
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
56
+ The epsilon used by the layer normalization layers.
57
+ attention_dropout (`float`, *optional*, defaults to 0.0):
58
+ The dropout ratio for the attention probabilities.
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+
62
+ Example:
63
+
64
+ ```python
65
+ >>> from transformers import GitVisionConfig, GitVisionModel
66
+
67
+ >>> # Initializing a GitVisionConfig with microsoft/git-base style configuration
68
+ >>> configuration = GitVisionConfig()
69
+
70
+ >>> # Initializing a GitVisionModel (with random weights) from the microsoft/git-base style configuration
71
+ >>> model = GitVisionModel(configuration)
72
+
73
+ >>> # Accessing the model configuration
74
+ >>> configuration = model.config
75
+ ```"""
76
+
77
+ model_type = "git_vision_model"
78
+
79
+ def __init__(
80
+ self,
81
+ hidden_size=768,
82
+ intermediate_size=3072,
83
+ num_hidden_layers=12,
84
+ num_attention_heads=12,
85
+ num_channels=3,
86
+ image_size=224,
87
+ patch_size=16,
88
+ hidden_act="quick_gelu",
89
+ layer_norm_eps=1e-5,
90
+ attention_dropout=0.0,
91
+ initializer_range=0.02,
92
+ **kwargs,
93
+ ):
94
+ super().__init__(**kwargs)
95
+
96
+ self.hidden_size = hidden_size
97
+ self.intermediate_size = intermediate_size
98
+ self.num_hidden_layers = num_hidden_layers
99
+ self.num_attention_heads = num_attention_heads
100
+ self.num_channels = num_channels
101
+ self.patch_size = patch_size
102
+ self.image_size = image_size
103
+ self.initializer_range = initializer_range
104
+ self.attention_dropout = attention_dropout
105
+ self.layer_norm_eps = layer_norm_eps
106
+ self.hidden_act = hidden_act
107
+
108
+ @classmethod
109
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
110
+ cls._set_token_in_kwargs(kwargs)
111
+
112
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
113
+
114
+ # get the vision config dict if we are loading from GITConfig
115
+ if config_dict.get("model_type") == "git":
116
+ config_dict = config_dict["vision_config"]
117
+
118
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
119
+ logger.warning(
120
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
121
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
122
+ )
123
+
124
+ return cls.from_dict(config_dict, **kwargs)
125
+
126
+
127
+ class GitConfig(PretrainedConfig):
128
+ r"""
129
+ This is the configuration class to store the configuration of a [`GitModel`]. It is used to instantiate a GIT model
130
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
131
+ defaults will yield a similar configuration to that of the GIT
132
+ [microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
133
+
134
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
135
+ documentation from [`PretrainedConfig`] for more information.
136
+
137
+ Args:
138
+ vision_config (`dict`, *optional*):
139
+ Dictionary of configuration options used to initialize [`GitVisionConfig`].
140
+ vocab_size (`int`, *optional*, defaults to 30522):
141
+ Vocabulary size of the GIT model. Defines the number of different tokens that can be represented by the
142
+ `inputs_ids` passed when calling [`GitModel`].
143
+ hidden_size (`int`, *optional*, defaults to 768):
144
+ Dimensionality of the encoder layers and the pooler layer.
145
+ num_hidden_layers (`int`, *optional*, defaults to 6):
146
+ Number of hidden layers in the Transformer encoder.
147
+ num_attention_heads (`int`, *optional*, defaults to 12):
148
+ Number of attention heads for each attention layer in the Transformer encoder.
149
+ intermediate_size (`int`, *optional*, defaults to 3072):
150
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
151
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
152
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
153
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
154
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
155
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
156
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
157
+ The dropout ratio for the attention probabilities.
158
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
159
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
160
+ just in case (e.g., 512 or 1024 or 2048).
161
+ initializer_range (`float`, *optional*, defaults to 0.02):
162
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
163
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
164
+ The epsilon used by the layer normalization layers.
165
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
166
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
167
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
168
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
169
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
170
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
171
+ use_cache (`bool`, *optional*, defaults to `True`):
172
+ Whether or not the model should return the last key/values attentions (not used by all models).
173
+ num_image_with_embedding (`int`, *optional*):
174
+ The number of temporal embeddings to add, in case the model is used for video captioning/VQA.
175
+
176
+ Examples:
177
+
178
+ ```python
179
+ >>> from transformers import GitConfig, GitModel
180
+
181
+ >>> # Initializing a GIT microsoft/git-base style configuration
182
+ >>> configuration = GitConfig()
183
+
184
+ >>> # Initializing a model (with random weights) from the microsoft/git-base style configuration
185
+ >>> model = GitModel(configuration)
186
+
187
+ >>> # Accessing the model configuration
188
+ >>> configuration = model.config
189
+ ```"""
190
+
191
+ model_type = "git"
192
+
193
+ def __init__(
194
+ self,
195
+ vision_config=None,
196
+ vocab_size=30522,
197
+ hidden_size=768,
198
+ num_hidden_layers=6,
199
+ num_attention_heads=12,
200
+ intermediate_size=3072,
201
+ hidden_act="gelu",
202
+ hidden_dropout_prob=0.1,
203
+ attention_probs_dropout_prob=0.1,
204
+ max_position_embeddings=1024,
205
+ initializer_range=0.02,
206
+ layer_norm_eps=1e-12,
207
+ pad_token_id=0,
208
+ position_embedding_type="absolute",
209
+ use_cache=True,
210
+ tie_word_embeddings=False,
211
+ bos_token_id=101,
212
+ eos_token_id=102,
213
+ num_image_with_embedding=None,
214
+ **kwargs,
215
+ ):
216
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
217
+
218
+ if vision_config is None:
219
+ vision_config = {}
220
+ logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
221
+
222
+ self.vision_config = GitVisionConfig(**vision_config)
223
+ self.vocab_size = vocab_size
224
+ self.hidden_size = hidden_size
225
+ self.num_hidden_layers = num_hidden_layers
226
+ self.num_attention_heads = num_attention_heads
227
+ self.hidden_act = hidden_act
228
+ self.intermediate_size = intermediate_size
229
+ self.hidden_dropout_prob = hidden_dropout_prob
230
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
231
+ self.max_position_embeddings = max_position_embeddings
232
+ self.initializer_range = initializer_range
233
+ self.layer_norm_eps = layer_norm_eps
234
+ self.position_embedding_type = position_embedding_type
235
+ self.use_cache = use_cache
236
+ self.tie_word_embeddings = tie_word_embeddings
237
+ self.num_image_with_embedding = num_image_with_embedding
238
+
239
+ self.bos_token_id = bos_token_id
240
+ self.eos_token_id = eos_token_id
venv/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert GIT checkpoints from the original repository.
16
+
17
+ URL: https://github.com/microsoft/GenerativeImage2Text/tree/main"""
18
+
19
+
20
+ import argparse
21
+ from pathlib import Path
22
+
23
+ import numpy as np
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+ from PIL import Image
28
+ from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
29
+
30
+ from transformers import (
31
+ AutoTokenizer,
32
+ CLIPImageProcessor,
33
+ GitConfig,
34
+ GitForCausalLM,
35
+ GitProcessor,
36
+ GitVisionConfig,
37
+ VideoMAEImageProcessor,
38
+ )
39
+ from transformers.utils import logging
40
+
41
+
42
+ logging.set_verbosity_info()
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ def get_git_config(model_name):
47
+ if "base" in model_name and "vqa" in model_name:
48
+ image_size = 480
49
+ elif "large" in model_name and "vqa" in model_name:
50
+ image_size = 420
51
+ else:
52
+ image_size = 224
53
+
54
+ vision_config = GitVisionConfig(image_size=image_size)
55
+
56
+ if "large" in model_name:
57
+ vision_config.patch_size = 14
58
+ vision_config.hidden_size = 1024
59
+ vision_config.intermediate_size = 4096
60
+ vision_config.num_hidden_layers = 24
61
+ vision_config.num_attention_heads = 16
62
+
63
+ is_video = "vatex" in model_name or "msrvtt" in model_name
64
+ num_image_with_embedding = 6 if is_video else None
65
+ config = GitConfig(vision_config=vision_config.to_dict(), num_image_with_embedding=num_image_with_embedding)
66
+
67
+ return config, image_size, is_video
68
+
69
+
70
+ # here we list all keys to be renamed (original name on the left, our name on the right)
71
+ def create_rename_keys(config, prefix=""):
72
+ rename_keys = []
73
+
74
+ # image encoder
75
+ # ftm: off
76
+ rename_keys.append(
77
+ (f"{prefix}image_encoder.class_embedding", "git.image_encoder.vision_model.embeddings.class_embedding")
78
+ )
79
+ rename_keys.append(
80
+ (
81
+ f"{prefix}image_encoder.positional_embedding",
82
+ "git.image_encoder.vision_model.embeddings.position_embedding.weight",
83
+ )
84
+ )
85
+ rename_keys.append(
86
+ (f"{prefix}image_encoder.conv1.weight", "git.image_encoder.vision_model.embeddings.patch_embedding.weight")
87
+ )
88
+ rename_keys.append((f"{prefix}image_encoder.ln_pre.weight", "git.image_encoder.vision_model.pre_layrnorm.weight"))
89
+ rename_keys.append((f"{prefix}image_encoder.ln_pre.bias", "git.image_encoder.vision_model.pre_layrnorm.bias"))
90
+ rename_keys.append(
91
+ (f"{prefix}image_encoder.ln_post.weight", "git.image_encoder.vision_model.post_layernorm.weight")
92
+ )
93
+ rename_keys.append((f"{prefix}image_encoder.ln_post.bias", "git.image_encoder.vision_model.post_layernorm.bias"))
94
+ # fmt: on
95
+ rename_keys.append((f"{prefix}image_encoder.proj", "git.image_encoder.visual_projection.weight"))
96
+
97
+ # fmt: off
98
+ for i in range(config.vision_config.num_hidden_layers):
99
+ # image encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
100
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"))
101
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"))
102
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.weight"))
103
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.bias"))
104
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.weight"))
105
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.bias"))
106
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.weight"))
107
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.bias"))
108
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.weight"))
109
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.bias"))
110
+ # fmt: on
111
+
112
+ # text decoder
113
+ # fmt: off
114
+ rename_keys.append((f"{prefix}textual.embedding.words.weight", "git.embeddings.word_embeddings.weight"))
115
+ rename_keys.append((f"{prefix}textual.embedding.positions.weight", "git.embeddings.position_embeddings.weight"))
116
+ rename_keys.append((f"{prefix}textual.visual_projection.0.weight", "git.visual_projection.visual_projection.0.weight"))
117
+ rename_keys.append((f"{prefix}textual.visual_projection.0.bias", "git.visual_projection.visual_projection.0.bias"))
118
+ rename_keys.append((f"{prefix}textual.visual_projection.1.weight", "git.visual_projection.visual_projection.1.weight"))
119
+ rename_keys.append((f"{prefix}textual.visual_projection.1.bias", "git.visual_projection.visual_projection.1.bias"))
120
+
121
+ rename_keys.append((f"{prefix}textual.embedding.layer_norm.weight", "git.embeddings.LayerNorm.weight"))
122
+ rename_keys.append((f"{prefix}textual.embedding.layer_norm.bias", "git.embeddings.LayerNorm.bias"))
123
+ rename_keys.append((f"{prefix}textual.output.weight", "output.weight"))
124
+ rename_keys.append((f"{prefix}textual.output.bias", "output.bias"))
125
+ for i in range(config.num_hidden_layers):
126
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.weight", f"git.encoder.layer.{i}.attention.self.query.weight"))
127
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.bias", f"git.encoder.layer.{i}.attention.self.query.bias"))
128
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.weight", f"git.encoder.layer.{i}.attention.self.key.weight"))
129
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.bias", f"git.encoder.layer.{i}.attention.self.key.bias"))
130
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.weight", f"git.encoder.layer.{i}.attention.self.value.weight"))
131
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.bias", f"git.encoder.layer.{i}.attention.self.value.bias"))
132
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.weight", f"git.encoder.layer.{i}.attention.output.dense.weight"))
133
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.bias", f"git.encoder.layer.{i}.attention.output.dense.bias"))
134
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.weight", f"git.encoder.layer.{i}.attention.output.LayerNorm.weight"))
135
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.bias", f"git.encoder.layer.{i}.attention.output.LayerNorm.bias"))
136
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.weight", f"git.encoder.layer.{i}.intermediate.dense.weight"))
137
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.bias", f"git.encoder.layer.{i}.intermediate.dense.bias"))
138
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.weight", f"git.encoder.layer.{i}.output.dense.weight"))
139
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.bias", f"git.encoder.layer.{i}.output.dense.bias"))
140
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.weight", f"git.encoder.layer.{i}.output.LayerNorm.weight"))
141
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.bias", f"git.encoder.layer.{i}.output.LayerNorm.bias"))
142
+ # fmt: on
143
+
144
+ if config.num_image_with_embedding is not None:
145
+ rename_keys.append(("img_temperal_embedding.0", "git.img_temperal_embedding.0"))
146
+ rename_keys.append(("img_temperal_embedding.1", "git.img_temperal_embedding.1"))
147
+ rename_keys.append(("img_temperal_embedding.2", "git.img_temperal_embedding.2"))
148
+ rename_keys.append(("img_temperal_embedding.3", "git.img_temperal_embedding.3"))
149
+ rename_keys.append(("img_temperal_embedding.4", "git.img_temperal_embedding.4"))
150
+ rename_keys.append(("img_temperal_embedding.5", "git.img_temperal_embedding.5"))
151
+
152
+ return rename_keys
153
+
154
+
155
+ def rename_key(dct, old, new):
156
+ val = dct.pop(old)
157
+ dct[new] = val.T if "image_encoder.visual_projection" in new else val
158
+
159
+
160
+ # we split up the matrix of each CLIP encoder layer into queries, keys and values
161
+ def read_in_q_k_v(state_dict, config, prefix=""):
162
+ dim = config.vision_config.hidden_size
163
+ for i in range(config.vision_config.num_hidden_layers):
164
+ # read in weights + bias of input projection layer (in the original implementation, this is a single matrix + bias)
165
+ in_proj_weight = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_weight")
166
+ in_proj_bias = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_bias")
167
+ # next, add query, keys and values (in that order) to the state dict
168
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[
169
+ :dim, :
170
+ ]
171
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:dim]
172
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
173
+ dim : dim * 2, :
174
+ ]
175
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[
176
+ dim : dim * 2
177
+ ]
178
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[
179
+ -dim:, :
180
+ ]
181
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-dim:]
182
+
183
+
184
+ # We will verify our results on an image
185
+ def prepare_img(model_name):
186
+ if "textvqa" in model_name:
187
+ filepath = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
188
+ image = Image.open(filepath).convert("RGB")
189
+ else:
190
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
191
+ image = Image.open(requests.get(url, stream=True).raw)
192
+
193
+ return image
194
+
195
+
196
+ def prepare_video():
197
+ from decord import VideoReader, cpu
198
+
199
+ # set seed for reproducability
200
+ np.random.seed(0)
201
+
202
+ def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
203
+ """
204
+ Sample a given number of frame indices from the video.
205
+
206
+ Args:
207
+ clip_len (`int`): Total number of frames to sample.
208
+ frame_sample_rate (`int`): Sample every n-th frame.
209
+ seg_len (`int`): Maximum allowed index of sample's last frame.
210
+
211
+ Returns:
212
+ indices (`List[int]`): List of sampled frame indices
213
+ """
214
+ converted_len = int(clip_len * frame_sample_rate)
215
+ end_idx = np.random.randint(converted_len, seg_len)
216
+ start_idx = end_idx - converted_len
217
+ indices = np.linspace(start_idx, end_idx, num=clip_len)
218
+ indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
219
+ return indices
220
+
221
+ # video clip consists of 300 frames (10 seconds at 30 FPS)
222
+ file_path = hf_hub_download(repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset")
223
+ videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0))
224
+
225
+ # sample 6 frames
226
+ videoreader.seek(0)
227
+ indices = sample_frame_indices(clip_len=6, frame_sample_rate=4, seg_len=len(videoreader))
228
+ video = videoreader.get_batch(indices).asnumpy()
229
+
230
+ return video
231
+
232
+
233
+ @torch.no_grad()
234
+ def convert_git_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
235
+ """
236
+ Copy/paste/tweak model's weights to our GIT structure.
237
+ """
238
+
239
+ model_name_to_url = {
240
+ "git-base": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE/snapshot/model.pt",
241
+ "git-base-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_COCO/snapshot/model.pt",
242
+ "git-base-textcaps": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTCAPS/snapshot/model.pt",
243
+ "git-base-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VQAv2/snapshot/model.pt",
244
+ "git-base-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTVQA/snapshot/model.pt", # todo
245
+ "git-base-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VATEX/snapshot/model.pt",
246
+ "git-base-msrvtt-qa": (
247
+ "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_MSRVTT_QA/snapshot/model.pt"
248
+ ),
249
+ "git-large": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE/snapshot/model.pt",
250
+ "git-large-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_COCO/snapshot/model.pt",
251
+ "git-large-textcaps": (
252
+ "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTCAPS/snapshot/model.pt"
253
+ ),
254
+ "git-large-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VQAv2/snapshot/model.pt",
255
+ "git-large-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTVQA/snapshot/model.pt",
256
+ "git-large-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VATEX/snapshot/model.pt",
257
+ "git-large-msrvtt-qa": (
258
+ "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_MSRVTT_QA/snapshot/model.pt"
259
+ ),
260
+ "git-large-r": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R/snapshot/model.pt",
261
+ "git-large-r-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_COCO/snapshot/model.pt",
262
+ "git-large-r-textcaps": (
263
+ "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_TEXTCAPS/snapshot/model.pt"
264
+ ),
265
+ }
266
+
267
+ model_name_to_path = {
268
+ "git-large": "/Users/nielsrogge/Documents/GIT/git_large_model.pt",
269
+ "git-large-coco": "/Users/nielsrogge/Documents/GIT/git_large_coco_model.pt",
270
+ "git-large-textcaps": "/Users/nielsrogge/Documents/GIT/git_large_textcaps_model.pt",
271
+ "git-large-vqav2": "/Users/nielsrogge/Documents/GIT/git_large_vqav2_model.pt",
272
+ "git-large-textvqa": "/Users/nielsrogge/Documents/GIT/git_large_textvqa_model.pt",
273
+ }
274
+
275
+ # define GIT configuration based on model name
276
+ config, image_size, is_video = get_git_config(model_name)
277
+ if "large" in model_name and not is_video and "large-r" not in model_name:
278
+ # large checkpoints take way too long to download
279
+ checkpoint_path = model_name_to_path[model_name]
280
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
281
+ else:
282
+ checkpoint_url = model_name_to_url[model_name]
283
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", file_name=model_name)[
284
+ "model"
285
+ ]
286
+ # rename keys
287
+ prefix = "module." if model_name == "git-base" else ""
288
+ rename_keys = create_rename_keys(config, prefix=prefix)
289
+ for src, dest in rename_keys:
290
+ rename_key(state_dict, src, dest)
291
+ read_in_q_k_v(state_dict, config, prefix=prefix)
292
+
293
+ # load HuggingFace model
294
+ model = GitForCausalLM(config)
295
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
296
+ model.eval()
297
+
298
+ print("Missing keys:", missing_keys)
299
+ print("Unexpected keys:", unexpected_keys)
300
+
301
+ assert missing_keys == ["git.embeddings.position_ids", "git.image_encoder.vision_model.embeddings.position_ids"]
302
+ assert unexpected_keys == ["git.image_encoder.visual_projection.weight"]
303
+
304
+ # verify results
305
+ image_processor = (
306
+ VideoMAEImageProcessor(
307
+ size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size}
308
+ )
309
+ if is_video
310
+ else CLIPImageProcessor(
311
+ size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size}
312
+ )
313
+ )
314
+ tokenizer = AutoTokenizer.from_pretrained(
315
+ "google-bert/bert-base-uncased", model_input_names=["input_ids", "attention_mask"]
316
+ )
317
+ processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
318
+
319
+ if is_video:
320
+ video = prepare_video()
321
+ pixel_values = processor(images=list(video), return_tensors="pt").pixel_values
322
+ else:
323
+ image = prepare_img(model_name)
324
+ image_transforms = Compose(
325
+ [
326
+ Resize(image_size, interpolation=Image.BICUBIC),
327
+ CenterCrop(image_size),
328
+ ToTensor(),
329
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
330
+ ]
331
+ )
332
+ original_pixel_values = image_transforms(image).unsqueeze(0)
333
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
334
+
335
+ assert torch.allclose(pixel_values, original_pixel_values)
336
+
337
+ input_ids = torch.tensor([[101]])
338
+ outputs = model(input_ids, pixel_values=pixel_values)
339
+ logits = outputs.logits
340
+ print("Logits:", logits[0, -1, :3])
341
+
342
+ if model_name == "git-base":
343
+ expected_slice_logits = torch.tensor([-1.2832, -1.2835, -1.2840])
344
+ elif model_name == "git-base-coco":
345
+ expected_slice_logits = torch.tensor([-0.9925, -0.9930, -0.9935])
346
+ elif model_name == "git-base-textcaps":
347
+ expected_slice_logits = torch.tensor([-1.2980, -1.2983, -1.2985])
348
+ elif model_name == "git-base-vqav2":
349
+ expected_slice_logits = torch.tensor([-0.8570, -0.8568, -0.8561])
350
+ elif model_name == "git-base-textvqa":
351
+ expected_slice_logits = torch.tensor([-1.4085, -1.4083, -1.4082])
352
+ elif model_name == "git-base-vatex":
353
+ expected_slice_logits = torch.tensor([-1.3451, -1.3447, -1.3447])
354
+ elif model_name == "git-base-msrvtt-qa":
355
+ expected_slice_logits = torch.tensor([-0.8554, -0.8550, -0.8540])
356
+ elif model_name == "git-large":
357
+ expected_slice_logits = torch.tensor([-1.1708, -1.1707, -1.1705])
358
+ elif model_name == "git-large-coco":
359
+ expected_slice_logits = torch.tensor([-1.0425, -1.0423, -1.0422])
360
+ elif model_name == "git-large-textcaps":
361
+ expected_slice_logits = torch.tensor([-1.2705, -1.2708, -1.2706])
362
+ elif model_name == "git-large-vqav2":
363
+ expected_slice_logits = torch.tensor([-0.7042, -0.7043, -0.7043])
364
+ elif model_name == "git-large-textvqa":
365
+ expected_slice_logits = torch.tensor([-0.8590, -0.8592, -0.8590])
366
+ elif model_name == "git-large-vatex":
367
+ expected_slice_logits = torch.tensor([-1.0113, -1.0114, -1.0113])
368
+ elif model_name == "git-large-msrvtt-qa":
369
+ expected_slice_logits = torch.tensor([0.0130, 0.0134, 0.0131])
370
+ elif model_name == "git-large-r":
371
+ expected_slice_logits = torch.tensor([-1.1283, -1.1285, -1.1286])
372
+ elif model_name == "git-large-r-coco":
373
+ expected_slice_logits = torch.tensor([-0.9641, -0.9641, -0.9641])
374
+ elif model_name == "git-large-r-textcaps":
375
+ expected_slice_logits = torch.tensor([-1.1121, -1.1120, -1.1124])
376
+
377
+ assert torch.allclose(logits[0, -1, :3], expected_slice_logits, atol=1e-4)
378
+ print("Looks ok!")
379
+
380
+ prompt = ""
381
+ if "textvqa" in model_name:
382
+ prompt = "what does the front of the bus say at the top?"
383
+ elif "msrvtt-qa" in model_name:
384
+ prompt = "what does the woman eat?"
385
+ elif "vqa" in model_name:
386
+ prompt = "what are the cats doing?"
387
+ input_ids = tokenizer(prompt, add_special_tokens=False).input_ids
388
+ input_ids = [processor.tokenizer.cls_token_id] + input_ids
389
+ input_ids = torch.tensor(input_ids).unsqueeze(0)
390
+ print("Generating caption...")
391
+ generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
392
+ print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
393
+
394
+ if pytorch_dump_folder_path is not None:
395
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
396
+ print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}")
397
+ model.save_pretrained(pytorch_dump_folder_path)
398
+ processor.save_pretrained(pytorch_dump_folder_path)
399
+
400
+ if push_to_hub:
401
+ print(f"Pushing model and processor of {model_name} to the hub...")
402
+ model.push_to_hub(f"microsoft/{model_name}")
403
+ processor.push_to_hub(f"microsoft/{model_name}")
404
+
405
+
406
+ if __name__ == "__main__":
407
+ parser = argparse.ArgumentParser()
408
+ # Required parameters
409
+ parser.add_argument(
410
+ "--model_name",
411
+ default="git-base",
412
+ type=str,
413
+ help="Name of the model you'd like to convert.",
414
+ )
415
+ parser.add_argument(
416
+ "--pytorch_dump_folder_path",
417
+ default=None,
418
+ type=str,
419
+ help="Path to the output PyTorch model directory.",
420
+ )
421
+ parser.add_argument(
422
+ "--push_to_hub",
423
+ action="store_true",
424
+ help="Whether to push the model to the hub.",
425
+ )
426
+
427
+ args = parser.parse_args()
428
+ convert_git_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/git/modeling_git.py ADDED
@@ -0,0 +1,1543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
3
+ # All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch GIT model."""
17
+
18
+
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...file_utils import ModelOutput
30
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
31
+ from ...modeling_outputs import (
32
+ BaseModelOutput,
33
+ BaseModelOutputWithPast,
34
+ BaseModelOutputWithPooling,
35
+ CausalLMOutputWithPast,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
39
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
40
+ from .configuration_git import GitConfig, GitVisionConfig
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ _CHECKPOINT_FOR_DOC = "microsoft/git-base"
46
+ _CONFIG_FOR_DOC = "GitConfig"
47
+
48
+
49
+ from ..deprecated._archive_maps import GIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
50
+
51
+
52
+ @dataclass
53
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git
54
+ class GitVisionModelOutput(ModelOutput):
55
+ """
56
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
57
+
58
+ Args:
59
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
60
+ The image embeddings obtained by applying the projection layer to the pooler_output.
61
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
62
+ Sequence of hidden-states at the output of the last layer of the model.
63
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
64
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
65
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
66
+
67
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
68
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
69
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
70
+ sequence_length)`.
71
+
72
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
73
+ heads.
74
+ """
75
+
76
+ image_embeds: Optional[torch.FloatTensor] = None
77
+ last_hidden_state: torch.FloatTensor = None
78
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
79
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
80
+
81
+
82
+ class GitEmbeddings(nn.Module):
83
+ """Construct the embeddings from word and position embeddings."""
84
+
85
+ def __init__(self, config):
86
+ super().__init__()
87
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
88
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
89
+
90
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
91
+ # any TensorFlow checkpoint file
92
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
93
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
94
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
95
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
96
+ self.register_buffer(
97
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
98
+ )
99
+
100
+ def forward(
101
+ self,
102
+ input_ids: Optional[torch.LongTensor] = None,
103
+ position_ids: Optional[torch.LongTensor] = None,
104
+ inputs_embeds: Optional[torch.FloatTensor] = None,
105
+ past_key_values_length: int = 0,
106
+ ) -> torch.Tensor:
107
+ if input_ids is not None:
108
+ input_shape = input_ids.size()
109
+ else:
110
+ input_shape = inputs_embeds.size()[:-1]
111
+
112
+ seq_length = input_shape[1]
113
+
114
+ if position_ids is None:
115
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
116
+
117
+ if inputs_embeds is None:
118
+ embeddings = self.word_embeddings(input_ids)
119
+ else:
120
+ embeddings = inputs_embeds
121
+
122
+ if self.position_embedding_type == "absolute":
123
+ position_embeddings = self.position_embeddings(position_ids)
124
+ embeddings += position_embeddings
125
+ embeddings = self.LayerNorm(embeddings)
126
+ embeddings = self.dropout(embeddings)
127
+ return embeddings
128
+
129
+
130
+ class GitSelfAttention(nn.Module):
131
+ def __init__(self, config, position_embedding_type=None):
132
+ super().__init__()
133
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
134
+ raise ValueError(
135
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
136
+ f"heads ({config.num_attention_heads})"
137
+ )
138
+
139
+ self.num_attention_heads = config.num_attention_heads
140
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
141
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
142
+ self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
143
+ if config.num_image_with_embedding is not None:
144
+ self.image_patch_tokens *= config.num_image_with_embedding
145
+
146
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
147
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
148
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
149
+
150
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
151
+ self.position_embedding_type = position_embedding_type or getattr(
152
+ config, "position_embedding_type", "absolute"
153
+ )
154
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
155
+ self.max_position_embeddings = config.max_position_embeddings
156
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
157
+
158
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
159
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
160
+ x = x.view(new_x_shape)
161
+ return x.permute(0, 2, 1, 3)
162
+
163
+ def forward(
164
+ self,
165
+ hidden_states: torch.Tensor,
166
+ attention_mask: Optional[torch.FloatTensor] = None,
167
+ head_mask: Optional[torch.FloatTensor] = None,
168
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
169
+ output_attentions: Optional[bool] = False,
170
+ pixel_values_present: Optional[bool] = False,
171
+ ) -> Tuple[torch.Tensor]:
172
+ mixed_query_layer = self.query(hidden_states)
173
+
174
+ cutoff = self.image_patch_tokens if pixel_values_present else 0
175
+ if past_key_value is not None:
176
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
177
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
178
+ key_layer = torch.cat([key_layer[:, :, :cutoff, :], past_key_value[0], key_layer[:, :, -1:, :]], dim=2)
179
+ value_layer = torch.cat(
180
+ [value_layer[:, :, :cutoff, :], past_key_value[1], value_layer[:, :, -1:, :]], dim=2
181
+ )
182
+ else:
183
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
184
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
185
+
186
+ query_layer = self.transpose_for_scores(mixed_query_layer)
187
+
188
+ use_cache = past_key_value is not None
189
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
190
+ # Further calls to cross_attention layer can then reuse all cross-attention
191
+ # key/value_states (first "if" case)
192
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
193
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
194
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
195
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
196
+ # NOTE: like in other caches, we store the text component. In GIT it means we discard the image component.
197
+ past_key_value = (
198
+ key_layer[:, :, cutoff:, :],
199
+ value_layer[:, :, cutoff:, :],
200
+ )
201
+
202
+ # Take the dot product between "query" and "key" to get the raw attention scores.
203
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
204
+
205
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
206
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
207
+ if use_cache:
208
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
209
+ -1, 1
210
+ )
211
+ else:
212
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
213
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
214
+ distance = position_ids_l - position_ids_r
215
+
216
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
217
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
218
+
219
+ if self.position_embedding_type == "relative_key":
220
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
221
+ attention_scores = attention_scores + relative_position_scores
222
+ elif self.position_embedding_type == "relative_key_query":
223
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
224
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
225
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
226
+
227
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
228
+ if attention_mask is not None:
229
+ # Apply the attention mask is (precomputed for all layers in GitModel forward() function)
230
+ attention_scores = attention_scores + attention_mask
231
+
232
+ # Normalize the attention scores to probabilities.
233
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
234
+
235
+ # This is actually dropping out entire tokens to attend to, which might
236
+ # seem a bit unusual, but is taken from the original Transformer paper.
237
+ attention_probs = self.dropout(attention_probs)
238
+
239
+ # Mask heads if we want to
240
+ if head_mask is not None:
241
+ attention_probs = attention_probs * head_mask
242
+
243
+ context_layer = torch.matmul(attention_probs, value_layer)
244
+
245
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
246
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
247
+ context_layer = context_layer.view(new_context_layer_shape)
248
+
249
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
250
+
251
+ outputs = outputs + (past_key_value,)
252
+ return outputs
253
+
254
+
255
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
256
+ class GitSelfOutput(nn.Module):
257
+ def __init__(self, config):
258
+ super().__init__()
259
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
260
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
261
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
262
+
263
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
264
+ hidden_states = self.dense(hidden_states)
265
+ hidden_states = self.dropout(hidden_states)
266
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
267
+ return hidden_states
268
+
269
+
270
+ class GitAttention(nn.Module):
271
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.__init__ with Bert->Git
272
+ def __init__(self, config, position_embedding_type=None):
273
+ super().__init__()
274
+ self.self = GitSelfAttention(config, position_embedding_type=position_embedding_type)
275
+ self.output = GitSelfOutput(config)
276
+ self.pruned_heads = set()
277
+
278
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
279
+ def prune_heads(self, heads):
280
+ if len(heads) == 0:
281
+ return
282
+ heads, index = find_pruneable_heads_and_indices(
283
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
284
+ )
285
+
286
+ # Prune linear layers
287
+ self.self.query = prune_linear_layer(self.self.query, index)
288
+ self.self.key = prune_linear_layer(self.self.key, index)
289
+ self.self.value = prune_linear_layer(self.self.value, index)
290
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
291
+
292
+ # Update hyper params and store pruned heads
293
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
294
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
295
+ self.pruned_heads = self.pruned_heads.union(heads)
296
+
297
+ def forward(
298
+ self,
299
+ hidden_states: torch.Tensor,
300
+ attention_mask: Optional[torch.FloatTensor] = None,
301
+ head_mask: Optional[torch.FloatTensor] = None,
302
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
303
+ output_attentions: Optional[bool] = False,
304
+ pixel_values_present: Optional[bool] = False,
305
+ ) -> Tuple[torch.Tensor]:
306
+ self_outputs = self.self(
307
+ hidden_states,
308
+ attention_mask,
309
+ head_mask,
310
+ past_key_value,
311
+ output_attentions,
312
+ pixel_values_present,
313
+ )
314
+ attention_output = self.output(self_outputs[0], hidden_states)
315
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
316
+ return outputs
317
+
318
+
319
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
320
+ class GitIntermediate(nn.Module):
321
+ def __init__(self, config):
322
+ super().__init__()
323
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
324
+ if isinstance(config.hidden_act, str):
325
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
326
+ else:
327
+ self.intermediate_act_fn = config.hidden_act
328
+
329
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
330
+ hidden_states = self.dense(hidden_states)
331
+ hidden_states = self.intermediate_act_fn(hidden_states)
332
+ return hidden_states
333
+
334
+
335
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
336
+ class GitOutput(nn.Module):
337
+ def __init__(self, config):
338
+ super().__init__()
339
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
340
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
341
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
342
+
343
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
344
+ hidden_states = self.dense(hidden_states)
345
+ hidden_states = self.dropout(hidden_states)
346
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
347
+ return hidden_states
348
+
349
+
350
+ class GitLayer(nn.Module):
351
+ def __init__(self, config):
352
+ super().__init__()
353
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
354
+ self.seq_len_dim = 1
355
+ self.attention = GitAttention(config)
356
+ self.intermediate = GitIntermediate(config)
357
+ self.output = GitOutput(config)
358
+
359
+ def forward(
360
+ self,
361
+ hidden_states: torch.Tensor,
362
+ attention_mask: Optional[torch.FloatTensor] = None,
363
+ head_mask: Optional[torch.FloatTensor] = None,
364
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
365
+ output_attentions: Optional[bool] = False,
366
+ pixel_values_present: Optional[bool] = False,
367
+ ) -> Tuple[torch.Tensor]:
368
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
369
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
370
+ self_attention_outputs = self.attention(
371
+ hidden_states,
372
+ attention_mask,
373
+ head_mask,
374
+ output_attentions=output_attentions,
375
+ past_key_value=self_attn_past_key_value,
376
+ pixel_values_present=pixel_values_present,
377
+ )
378
+ attention_output = self_attention_outputs[0]
379
+
380
+ # if decoder, the last output is tuple of self-attn cache
381
+ outputs = self_attention_outputs[1:-1]
382
+ present_key_value = self_attention_outputs[-1]
383
+
384
+ layer_output = apply_chunking_to_forward(
385
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
386
+ )
387
+ outputs = (layer_output,) + outputs
388
+
389
+ # if decoder, return the attn key/values as the last output
390
+ outputs = outputs + (present_key_value,)
391
+
392
+ return outputs
393
+
394
+ def feed_forward_chunk(self, attention_output):
395
+ intermediate_output = self.intermediate(attention_output)
396
+ layer_output = self.output(intermediate_output, attention_output)
397
+ return layer_output
398
+
399
+
400
+ class GitEncoder(nn.Module):
401
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Git
402
+ def __init__(self, config):
403
+ super().__init__()
404
+ self.config = config
405
+ self.layer = nn.ModuleList([GitLayer(config) for _ in range(config.num_hidden_layers)])
406
+ self.gradient_checkpointing = False
407
+
408
+ def forward(
409
+ self,
410
+ hidden_states: torch.Tensor,
411
+ attention_mask: Optional[torch.FloatTensor] = None,
412
+ head_mask: Optional[torch.FloatTensor] = None,
413
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
414
+ use_cache: Optional[bool] = None,
415
+ output_attentions: Optional[bool] = False,
416
+ output_hidden_states: Optional[bool] = False,
417
+ pixel_values_present: Optional[bool] = False,
418
+ return_dict: Optional[bool] = True,
419
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
420
+ if self.gradient_checkpointing and self.training:
421
+ if use_cache:
422
+ logger.warning_once(
423
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
424
+ )
425
+ use_cache = False
426
+
427
+ all_hidden_states = () if output_hidden_states else None
428
+ all_self_attentions = () if output_attentions else None
429
+
430
+ next_decoder_cache = () if use_cache else None
431
+ for i, layer_module in enumerate(self.layer):
432
+ if output_hidden_states:
433
+ all_hidden_states = all_hidden_states + (hidden_states,)
434
+
435
+ layer_head_mask = head_mask[i] if head_mask is not None else None
436
+ past_key_value = past_key_values[i] if past_key_values is not None else None
437
+
438
+ if self.gradient_checkpointing and self.training:
439
+ layer_outputs = self._gradient_checkpointing_func(
440
+ layer_module.__call__,
441
+ hidden_states,
442
+ attention_mask,
443
+ layer_head_mask,
444
+ past_key_value,
445
+ output_attentions,
446
+ )
447
+ else:
448
+ layer_outputs = layer_module(
449
+ hidden_states,
450
+ attention_mask,
451
+ layer_head_mask,
452
+ past_key_value,
453
+ output_attentions,
454
+ pixel_values_present,
455
+ )
456
+
457
+ hidden_states = layer_outputs[0]
458
+ if use_cache:
459
+ next_decoder_cache += (layer_outputs[-1],)
460
+ if output_attentions:
461
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
462
+
463
+ if output_hidden_states:
464
+ all_hidden_states = all_hidden_states + (hidden_states,)
465
+
466
+ if not return_dict:
467
+ return tuple(
468
+ v
469
+ for v in [
470
+ hidden_states,
471
+ next_decoder_cache,
472
+ all_hidden_states,
473
+ all_self_attentions,
474
+ ]
475
+ if v is not None
476
+ )
477
+ return BaseModelOutputWithPast(
478
+ last_hidden_state=hidden_states,
479
+ past_key_values=next_decoder_cache,
480
+ hidden_states=all_hidden_states,
481
+ attentions=all_self_attentions,
482
+ )
483
+
484
+
485
+ class GitPreTrainedModel(PreTrainedModel):
486
+ """
487
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
488
+ models.
489
+ """
490
+
491
+ config_class = GitConfig
492
+ base_model_prefix = "git"
493
+ supports_gradient_checkpointing = True
494
+
495
+ def _init_weights(self, module):
496
+ """Initialize the weights"""
497
+ if isinstance(module, GitVisionEmbeddings):
498
+ nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range)
499
+ nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range)
500
+ nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range)
501
+ if isinstance(module, nn.Linear):
502
+ # Slightly different from the TF version which uses truncated_normal for initialization
503
+ # cf https://github.com/pytorch/pytorch/pull/5617
504
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
505
+ if module.bias is not None:
506
+ module.bias.data.zero_()
507
+ elif isinstance(module, nn.Embedding):
508
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
509
+ if module.padding_idx is not None:
510
+ module.weight.data[module.padding_idx].zero_()
511
+ elif isinstance(module, nn.LayerNorm):
512
+ module.bias.data.zero_()
513
+ module.weight.data.fill_(1.0)
514
+
515
+
516
+ GIT_START_DOCSTRING = r"""
517
+
518
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
519
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
520
+ etc.)
521
+
522
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
523
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
524
+ and behavior.
525
+
526
+ Parameters:
527
+ config ([`GitConfig`]): Model configuration class with all the parameters of the model.
528
+ Initializing with a config file does not load the weights associated with the model, only the
529
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
530
+ """
531
+
532
+ GIT_INPUTS_DOCSTRING = r"""
533
+ Args:
534
+ input_ids (`torch.LongTensor` of shape `({0})`):
535
+ Indices of input sequence tokens in the vocabulary.
536
+
537
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
538
+ [`PreTrainedTokenizer.__call__`] for details.
539
+
540
+ [What are input IDs?](../glossary#input-ids)
541
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
542
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
543
+
544
+ - 1 for tokens that are **not masked**,
545
+ - 0 for tokens that are **masked**.
546
+
547
+ [What are attention masks?](../glossary#attention-mask)
548
+
549
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
550
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
551
+ config.max_position_embeddings - 1]`.
552
+
553
+ [What are position IDs?](../glossary#position-ids)
554
+
555
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
556
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
557
+ [`CLIPImageProcessor.__call__`] for details.
558
+
559
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
560
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
561
+
562
+ - 1 indicates the head is **not masked**,
563
+ - 0 indicates the head is **masked**.
564
+
565
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
566
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
567
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
568
+ model's internal embedding lookup matrix.
569
+ output_attentions (`bool`, *optional*):
570
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
571
+ tensors for more detail.
572
+ output_hidden_states (`bool`, *optional*):
573
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
574
+ more detail.
575
+ return_dict (`bool`, *optional*):
576
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
577
+ """
578
+
579
+
580
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Git
581
+ class GitVisionEmbeddings(nn.Module):
582
+ def __init__(self, config: GitVisionConfig):
583
+ super().__init__()
584
+ self.config = config
585
+ self.embed_dim = config.hidden_size
586
+ self.image_size = config.image_size
587
+ self.patch_size = config.patch_size
588
+
589
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
590
+
591
+ self.patch_embedding = nn.Conv2d(
592
+ in_channels=config.num_channels,
593
+ out_channels=self.embed_dim,
594
+ kernel_size=self.patch_size,
595
+ stride=self.patch_size,
596
+ bias=False,
597
+ )
598
+
599
+ self.num_patches = (self.image_size // self.patch_size) ** 2
600
+ self.num_positions = self.num_patches + 1
601
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
602
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
603
+
604
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
605
+ batch_size = pixel_values.shape[0]
606
+ target_dtype = self.patch_embedding.weight.dtype
607
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
608
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
609
+
610
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
611
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
612
+ embeddings = embeddings + self.position_embedding(self.position_ids)
613
+ return embeddings
614
+
615
+
616
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP
617
+ class GitVisionMLP(nn.Module):
618
+ def __init__(self, config):
619
+ super().__init__()
620
+ self.config = config
621
+ self.activation_fn = ACT2FN[config.hidden_act]
622
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
623
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
624
+
625
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
626
+ hidden_states = self.fc1(hidden_states)
627
+ hidden_states = self.activation_fn(hidden_states)
628
+ hidden_states = self.fc2(hidden_states)
629
+ return hidden_states
630
+
631
+
632
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention
633
+ class GitVisionAttention(nn.Module):
634
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
635
+
636
+ def __init__(self, config):
637
+ super().__init__()
638
+ self.config = config
639
+ self.embed_dim = config.hidden_size
640
+ self.num_heads = config.num_attention_heads
641
+ self.head_dim = self.embed_dim // self.num_heads
642
+ if self.head_dim * self.num_heads != self.embed_dim:
643
+ raise ValueError(
644
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
645
+ f" {self.num_heads})."
646
+ )
647
+ self.scale = self.head_dim**-0.5
648
+ self.dropout = config.attention_dropout
649
+
650
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
651
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
652
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
653
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
654
+
655
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
656
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
657
+
658
+ def forward(
659
+ self,
660
+ hidden_states: torch.Tensor,
661
+ attention_mask: Optional[torch.Tensor] = None,
662
+ causal_attention_mask: Optional[torch.Tensor] = None,
663
+ output_attentions: Optional[bool] = False,
664
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
665
+ """Input shape: Batch x Time x Channel"""
666
+
667
+ bsz, tgt_len, embed_dim = hidden_states.size()
668
+
669
+ # get query proj
670
+ query_states = self.q_proj(hidden_states) * self.scale
671
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
672
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
673
+
674
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
675
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
676
+ key_states = key_states.view(*proj_shape)
677
+ value_states = value_states.view(*proj_shape)
678
+
679
+ src_len = key_states.size(1)
680
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
681
+
682
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
683
+ raise ValueError(
684
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
685
+ f" {attn_weights.size()}"
686
+ )
687
+
688
+ # apply the causal_attention_mask first
689
+ if causal_attention_mask is not None:
690
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
691
+ raise ValueError(
692
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
693
+ f" {causal_attention_mask.size()}"
694
+ )
695
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
696
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
697
+
698
+ if attention_mask is not None:
699
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
700
+ raise ValueError(
701
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
702
+ )
703
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
704
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
705
+
706
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
707
+
708
+ if output_attentions:
709
+ # this operation is a bit akward, but it's required to
710
+ # make sure that attn_weights keeps its gradient.
711
+ # In order to do so, attn_weights have to reshaped
712
+ # twice and have to be reused in the following
713
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
714
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
715
+ else:
716
+ attn_weights_reshaped = None
717
+
718
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
719
+
720
+ attn_output = torch.bmm(attn_probs, value_states)
721
+
722
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
723
+ raise ValueError(
724
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
725
+ f" {attn_output.size()}"
726
+ )
727
+
728
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
729
+ attn_output = attn_output.transpose(1, 2)
730
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
731
+
732
+ attn_output = self.out_proj(attn_output)
733
+
734
+ return attn_output, attn_weights_reshaped
735
+
736
+
737
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GitVision
738
+ class GitVisionEncoderLayer(nn.Module):
739
+ def __init__(self, config: GitVisionConfig):
740
+ super().__init__()
741
+ self.embed_dim = config.hidden_size
742
+ self.self_attn = GitVisionAttention(config)
743
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
744
+ self.mlp = GitVisionMLP(config)
745
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
746
+
747
+ def forward(
748
+ self,
749
+ hidden_states: torch.Tensor,
750
+ attention_mask: torch.Tensor,
751
+ causal_attention_mask: torch.Tensor,
752
+ output_attentions: Optional[bool] = False,
753
+ ) -> Tuple[torch.FloatTensor]:
754
+ """
755
+ Args:
756
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
757
+ attention_mask (`torch.FloatTensor`): attention mask of size
758
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
759
+ `(config.encoder_attention_heads,)`.
760
+ output_attentions (`bool`, *optional*):
761
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
762
+ returned tensors for more detail.
763
+ """
764
+ residual = hidden_states
765
+
766
+ hidden_states = self.layer_norm1(hidden_states)
767
+ hidden_states, attn_weights = self.self_attn(
768
+ hidden_states=hidden_states,
769
+ attention_mask=attention_mask,
770
+ causal_attention_mask=causal_attention_mask,
771
+ output_attentions=output_attentions,
772
+ )
773
+ hidden_states = residual + hidden_states
774
+
775
+ residual = hidden_states
776
+ hidden_states = self.layer_norm2(hidden_states)
777
+ hidden_states = self.mlp(hidden_states)
778
+ hidden_states = residual + hidden_states
779
+
780
+ outputs = (hidden_states,)
781
+
782
+ if output_attentions:
783
+ outputs += (attn_weights,)
784
+
785
+ return outputs
786
+
787
+
788
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->GitVision, CLIPConfig
789
+ class GitVisionEncoder(nn.Module):
790
+ """
791
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
792
+ [`GitVisionEncoderLayer`].
793
+
794
+ Args:
795
+ config: GitVisionConfig
796
+ """
797
+
798
+ def __init__(self, config: GitVisionConfig):
799
+ super().__init__()
800
+ self.config = config
801
+ self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
802
+ self.gradient_checkpointing = False
803
+
804
+ def forward(
805
+ self,
806
+ inputs_embeds,
807
+ attention_mask: Optional[torch.Tensor] = None,
808
+ causal_attention_mask: Optional[torch.Tensor] = None,
809
+ output_attentions: Optional[bool] = None,
810
+ output_hidden_states: Optional[bool] = None,
811
+ return_dict: Optional[bool] = None,
812
+ ) -> Union[Tuple, BaseModelOutput]:
813
+ r"""
814
+ Args:
815
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
816
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
817
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
818
+ than the model's internal embedding lookup matrix.
819
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
820
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
821
+
822
+ - 1 for tokens that are **not masked**,
823
+ - 0 for tokens that are **masked**.
824
+
825
+ [What are attention masks?](../glossary#attention-mask)
826
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
827
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
828
+
829
+ - 1 for tokens that are **not masked**,
830
+ - 0 for tokens that are **masked**.
831
+
832
+ [What are attention masks?](../glossary#attention-mask)
833
+ output_attentions (`bool`, *optional*):
834
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
835
+ returned tensors for more detail.
836
+ output_hidden_states (`bool`, *optional*):
837
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
838
+ for more detail.
839
+ return_dict (`bool`, *optional*):
840
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
841
+ """
842
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
843
+ output_hidden_states = (
844
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
845
+ )
846
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
847
+
848
+ encoder_states = () if output_hidden_states else None
849
+ all_attentions = () if output_attentions else None
850
+
851
+ hidden_states = inputs_embeds
852
+ for idx, encoder_layer in enumerate(self.layers):
853
+ if output_hidden_states:
854
+ encoder_states = encoder_states + (hidden_states,)
855
+ if self.gradient_checkpointing and self.training:
856
+ layer_outputs = self._gradient_checkpointing_func(
857
+ encoder_layer.__call__,
858
+ hidden_states,
859
+ attention_mask,
860
+ causal_attention_mask,
861
+ output_attentions,
862
+ )
863
+ else:
864
+ layer_outputs = encoder_layer(
865
+ hidden_states,
866
+ attention_mask,
867
+ causal_attention_mask,
868
+ output_attentions=output_attentions,
869
+ )
870
+
871
+ hidden_states = layer_outputs[0]
872
+
873
+ if output_attentions:
874
+ all_attentions = all_attentions + (layer_outputs[1],)
875
+
876
+ if output_hidden_states:
877
+ encoder_states = encoder_states + (hidden_states,)
878
+
879
+ if not return_dict:
880
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
881
+ return BaseModelOutput(
882
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
883
+ )
884
+
885
+
886
+ GIT_VISION_INPUTS_DOCSTRING = r"""
887
+ Args:
888
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
889
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
890
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
891
+ output_attentions (`bool`, *optional*):
892
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
893
+ tensors for more detail.
894
+ output_hidden_states (`bool`, *optional*):
895
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
896
+ more detail.
897
+ return_dict (`bool`, *optional*):
898
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
899
+ """
900
+
901
+
902
+ class GitVisionTransformer(nn.Module):
903
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPEncoder->GitVisionEncoder, CLIP->Git
904
+ def __init__(self, config: GitVisionConfig):
905
+ super().__init__()
906
+ self.config = config
907
+ embed_dim = config.hidden_size
908
+
909
+ self.embeddings = GitVisionEmbeddings(config)
910
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
911
+ self.encoder = GitVisionEncoder(config)
912
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
913
+
914
+ @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
915
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
916
+ def forward(
917
+ self,
918
+ pixel_values: Optional[torch.FloatTensor] = None,
919
+ output_attentions: Optional[bool] = None,
920
+ output_hidden_states: Optional[bool] = None,
921
+ return_dict: Optional[bool] = None,
922
+ ) -> Union[Tuple, BaseModelOutput]:
923
+ r"""
924
+ Returns:
925
+
926
+ """
927
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
928
+ output_hidden_states = (
929
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
930
+ )
931
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
932
+
933
+ if pixel_values is None:
934
+ raise ValueError("You have to specify pixel_values")
935
+
936
+ hidden_states = self.embeddings(pixel_values)
937
+ hidden_states = self.pre_layrnorm(hidden_states)
938
+
939
+ encoder_outputs = self.encoder(
940
+ inputs_embeds=hidden_states,
941
+ output_attentions=output_attentions,
942
+ output_hidden_states=output_hidden_states,
943
+ return_dict=return_dict,
944
+ )
945
+
946
+ last_hidden_state = encoder_outputs[0]
947
+
948
+ last_hidden_state = self.post_layernorm(last_hidden_state)
949
+
950
+ if not return_dict:
951
+ return (last_hidden_state,) + encoder_outputs[1:]
952
+
953
+ return BaseModelOutput(
954
+ last_hidden_state=last_hidden_state,
955
+ hidden_states=encoder_outputs.hidden_states,
956
+ attentions=encoder_outputs.attentions,
957
+ )
958
+
959
+
960
+ @add_start_docstrings(
961
+ """The vision model from CLIP, used in GIT, without any head or projection on top.""",
962
+ GIT_START_DOCSTRING,
963
+ )
964
+ class GitVisionModel(GitPreTrainedModel):
965
+ config_class = GitVisionConfig
966
+ main_input_name = "pixel_values"
967
+
968
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git
969
+ def __init__(self, config: GitVisionConfig):
970
+ super().__init__(config)
971
+ self.vision_model = GitVisionTransformer(config)
972
+ # Initialize weights and apply final processing
973
+ self.post_init()
974
+
975
+ def get_input_embeddings(self) -> nn.Module:
976
+ return self.vision_model.embeddings.patch_embedding
977
+
978
+ @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
979
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
980
+ def forward(
981
+ self,
982
+ pixel_values: Optional[torch.FloatTensor] = None,
983
+ output_attentions: Optional[bool] = None,
984
+ output_hidden_states: Optional[bool] = None,
985
+ return_dict: Optional[bool] = None,
986
+ ) -> Union[Tuple, BaseModelOutput]:
987
+ r"""
988
+ Returns:
989
+
990
+ Examples:
991
+
992
+ ```python
993
+ >>> from PIL import Image
994
+ >>> import requests
995
+ >>> from transformers import AutoProcessor, GitVisionModel
996
+
997
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
998
+ >>> model = GitVisionModel.from_pretrained("microsoft/git-base")
999
+
1000
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1001
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1002
+
1003
+ >>> inputs = processor(images=image, return_tensors="pt")
1004
+
1005
+ >>> outputs = model(**inputs)
1006
+ >>> last_hidden_state = outputs.last_hidden_state
1007
+ ```"""
1008
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1009
+
1010
+ return self.vision_model(
1011
+ pixel_values=pixel_values,
1012
+ output_attentions=output_attentions,
1013
+ output_hidden_states=output_hidden_states,
1014
+ return_dict=return_dict,
1015
+ )
1016
+
1017
+
1018
+ class GitProjection(nn.Module):
1019
+ def __init__(self, config: GitConfig):
1020
+ super().__init__()
1021
+ self.config = config
1022
+ self.visual_projection = nn.Sequential(
1023
+ nn.Linear(config.vision_config.hidden_size, config.hidden_size),
1024
+ nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps),
1025
+ )
1026
+
1027
+ def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
1028
+ return self.visual_projection(embeddings)
1029
+
1030
+
1031
+ @add_start_docstrings(
1032
+ "The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states"
1033
+ " without any specific head on top.",
1034
+ GIT_START_DOCSTRING,
1035
+ )
1036
+ class GitModel(GitPreTrainedModel):
1037
+ def __init__(self, config):
1038
+ super().__init__(config)
1039
+ self.config = config
1040
+
1041
+ self.embeddings = GitEmbeddings(config)
1042
+ self.image_encoder = GitVisionModel(config.vision_config)
1043
+ self.encoder = GitEncoder(config)
1044
+
1045
+ self.visual_projection = GitProjection(config)
1046
+
1047
+ if config.num_image_with_embedding is not None:
1048
+ self.img_temperal_embedding = nn.ParameterList(
1049
+ nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size))
1050
+ for _ in range(config.num_image_with_embedding)
1051
+ )
1052
+
1053
+ # Initialize weights and apply final processing
1054
+ self.post_init()
1055
+
1056
+ def get_input_embeddings(self):
1057
+ return self.embeddings.word_embeddings
1058
+
1059
+ def set_input_embeddings(self, value):
1060
+ self.embeddings.word_embeddings = value
1061
+
1062
+ def _prune_heads(self, heads_to_prune):
1063
+ """
1064
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1065
+ class PreTrainedModel
1066
+ """
1067
+ for layer, heads in heads_to_prune.items():
1068
+ self.encoder.layer[layer].attention.prune_heads(heads)
1069
+
1070
+ def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
1071
+ # Default mask is for forward direction. Flip for backward direction.
1072
+ mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
1073
+ mask = mask.masked_fill(mask == 1, float("-inf"))
1074
+ return mask
1075
+
1076
+ def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
1077
+ num_tgt = tgt.shape[1]
1078
+ num_memory = memory.shape[1]
1079
+ device = tgt.device
1080
+ dtype = tgt.dtype
1081
+ top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
1082
+ top_right = torch.full(
1083
+ (num_memory, num_tgt + past_key_values_length),
1084
+ float("-inf"),
1085
+ device=tgt.device,
1086
+ dtype=dtype,
1087
+ )
1088
+ bottom_left = torch.zeros(
1089
+ (num_tgt, num_memory),
1090
+ dtype=dtype,
1091
+ device=tgt_mask.device,
1092
+ )
1093
+
1094
+ if past_key_values_length > 0:
1095
+ tgt_mask = torch.zeros(
1096
+ (tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length),
1097
+ dtype=dtype,
1098
+ device=tgt_mask.device,
1099
+ )
1100
+
1101
+ left = torch.cat((top_left, bottom_left), dim=0)
1102
+ right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
1103
+
1104
+ full_attention_mask = torch.cat((left, right), dim=1)[None, :]
1105
+
1106
+ if memory_key_padding_mask is None:
1107
+ memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
1108
+ # if it is False, it means valid. That is, it is not a padding
1109
+ if memory_key_padding_mask.dtype != torch.bool:
1110
+ raise ValueError("Memory key padding mask must be a boolean tensor.")
1111
+ zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
1112
+ zero_negative_infinity[memory_key_padding_mask] = float("-inf")
1113
+ full_attention_mask = full_attention_mask.expand(
1114
+ (memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)
1115
+ )
1116
+ full_attention_mask = full_attention_mask.clone()
1117
+ origin_left = full_attention_mask[:, :, :num_memory]
1118
+ update = zero_negative_infinity[:, None, :]
1119
+ full_attention_mask[:, :, :num_memory] = origin_left + update
1120
+
1121
+ # add axis for multi-head
1122
+ full_attention_mask = full_attention_mask[:, None, :, :]
1123
+
1124
+ return full_attention_mask
1125
+
1126
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1127
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
1128
+ def forward(
1129
+ self,
1130
+ input_ids: Optional[torch.Tensor] = None,
1131
+ attention_mask: Optional[torch.Tensor] = None,
1132
+ position_ids: Optional[torch.Tensor] = None,
1133
+ pixel_values: Optional[torch.Tensor] = None,
1134
+ head_mask: Optional[torch.Tensor] = None,
1135
+ inputs_embeds: Optional[torch.Tensor] = None,
1136
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1137
+ use_cache: Optional[bool] = None,
1138
+ output_attentions: Optional[bool] = None,
1139
+ output_hidden_states: Optional[bool] = None,
1140
+ return_dict: Optional[bool] = None,
1141
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
1142
+ r"""
1143
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1144
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1145
+
1146
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1147
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1148
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1149
+ use_cache (`bool`, *optional*):
1150
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1151
+ `past_key_values`).
1152
+
1153
+ Returns:
1154
+
1155
+ Examples:
1156
+
1157
+ ```python
1158
+ >>> from transformers import AutoProcessor, AutoModel
1159
+ >>> import requests
1160
+ >>> from PIL import Image
1161
+
1162
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
1163
+ >>> model = AutoModel.from_pretrained("microsoft/git-base")
1164
+
1165
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1166
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1167
+
1168
+ >>> text = "this is an image of two cats"
1169
+
1170
+ >>> inputs = processor(text, images=image, return_tensors="pt")
1171
+
1172
+ >>> outputs = model(**inputs)
1173
+ >>> last_hidden_state = outputs.last_hidden_state
1174
+ ```"""
1175
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1176
+ output_hidden_states = (
1177
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1178
+ )
1179
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1180
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1181
+
1182
+ if input_ids is not None and inputs_embeds is not None:
1183
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1184
+ elif input_ids is not None:
1185
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1186
+ input_shape = input_ids.size()
1187
+ elif inputs_embeds is not None:
1188
+ input_shape = inputs_embeds.size()[:-1]
1189
+ else:
1190
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1191
+
1192
+ seq_length = input_shape[1]
1193
+
1194
+ # past_key_values_length
1195
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1196
+
1197
+ # Prepare head mask if needed
1198
+ # 1.0 in head_mask indicate we keep the head
1199
+ # attention_probs has shape bsz x n_heads x N x N
1200
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1201
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1202
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1203
+
1204
+ projected_visual_features = None
1205
+ if pixel_values is not None:
1206
+ if pixel_values.ndim == 4:
1207
+ # here we assume pixel_values is of shape (batch_size, num_channels, height, width)
1208
+ visual_features = self.image_encoder(pixel_values).last_hidden_state
1209
+
1210
+ elif pixel_values.ndim == 5:
1211
+ # here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width)
1212
+ visual_features = []
1213
+ for frame_idx in range(pixel_values.shape[1]):
1214
+ visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :]).last_hidden_state
1215
+ visual_features_frame += self.img_temperal_embedding[frame_idx]
1216
+ visual_features.append(visual_features_frame)
1217
+
1218
+ # finally, concatenate all features along sequence dimension
1219
+ visual_features = torch.cat(visual_features, dim=1)
1220
+
1221
+ else:
1222
+ raise ValueError("pixel_values must be of rank 4 or 5")
1223
+
1224
+ projected_visual_features = self.visual_projection(visual_features)
1225
+
1226
+ embedding_output = self.embeddings(
1227
+ input_ids=input_ids,
1228
+ position_ids=position_ids,
1229
+ inputs_embeds=inputs_embeds,
1230
+ past_key_values_length=past_key_values_length,
1231
+ )
1232
+
1233
+ if projected_visual_features is None:
1234
+ projected_visual_features = torch.zeros(
1235
+ (embedding_output.shape[0], 0, embedding_output.shape[2]),
1236
+ dtype=embedding_output.dtype,
1237
+ device=embedding_output.device,
1238
+ )
1239
+
1240
+ # Repeat visual features to match embedding batch size.
1241
+ projected_visual_features = projected_visual_features.repeat(
1242
+ embedding_output.size(0) // projected_visual_features.size(0), 1, 1
1243
+ )
1244
+
1245
+ # concatenate patch token and text token embeddings
1246
+ hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
1247
+
1248
+ # By default, an additive causal mask is created
1249
+ # for masking the future (one direction).
1250
+ tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
1251
+
1252
+ # Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len)
1253
+ combined_attention_mask = self.create_attention_mask(
1254
+ tgt=embedding_output,
1255
+ memory=projected_visual_features,
1256
+ tgt_mask=tgt_mask,
1257
+ past_key_values_length=past_key_values_length,
1258
+ )
1259
+
1260
+ if attention_mask is not None:
1261
+ # if the user provides an attention mask, we add it to the default one
1262
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1263
+ expanded_attn_mask = _prepare_4d_attention_mask(
1264
+ attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]
1265
+ ).to(embedding_output.device)
1266
+ if past_key_values_length > 0:
1267
+ expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
1268
+ else:
1269
+ combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask
1270
+
1271
+ encoder_outputs = self.encoder(
1272
+ hidden_states,
1273
+ attention_mask=combined_attention_mask,
1274
+ head_mask=head_mask,
1275
+ past_key_values=past_key_values,
1276
+ use_cache=use_cache,
1277
+ output_attentions=output_attentions,
1278
+ output_hidden_states=output_hidden_states,
1279
+ return_dict=return_dict,
1280
+ pixel_values_present=pixel_values is not None,
1281
+ )
1282
+ sequence_output = encoder_outputs[0]
1283
+
1284
+ if not return_dict:
1285
+ return (sequence_output,) + encoder_outputs[1:]
1286
+
1287
+ return BaseModelOutputWithPast(
1288
+ last_hidden_state=sequence_output,
1289
+ past_key_values=encoder_outputs.past_key_values,
1290
+ hidden_states=encoder_outputs.hidden_states,
1291
+ attentions=encoder_outputs.attentions,
1292
+ )
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ """GIT Model with a `language modeling` head on top for autoregressive language modeling.""", GIT_START_DOCSTRING
1297
+ )
1298
+ class GitForCausalLM(GitPreTrainedModel):
1299
+ _tied_weights_keys = ["output.weight"]
1300
+
1301
+ def __init__(self, config):
1302
+ super().__init__(config)
1303
+
1304
+ self.git = GitModel(config)
1305
+ self.output = nn.Linear(config.hidden_size, config.vocab_size)
1306
+
1307
+ # Initialize weights and apply final processing
1308
+ self.post_init()
1309
+
1310
+ def get_output_embeddings(self):
1311
+ return self.output
1312
+
1313
+ def set_output_embeddings(self, new_embeddings):
1314
+ self.output = new_embeddings
1315
+
1316
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1317
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1318
+ def forward(
1319
+ self,
1320
+ input_ids: Optional[torch.Tensor] = None,
1321
+ attention_mask: Optional[torch.Tensor] = None,
1322
+ position_ids: Optional[torch.Tensor] = None,
1323
+ pixel_values: Optional[torch.Tensor] = None,
1324
+ head_mask: Optional[torch.Tensor] = None,
1325
+ inputs_embeds: Optional[torch.Tensor] = None,
1326
+ labels: Optional[torch.Tensor] = None,
1327
+ past_key_values: Optional[List[torch.Tensor]] = None,
1328
+ use_cache: Optional[bool] = None,
1329
+ output_attentions: Optional[bool] = None,
1330
+ output_hidden_states: Optional[bool] = None,
1331
+ return_dict: Optional[bool] = None,
1332
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
1333
+ r"""
1334
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1335
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1336
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1337
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1338
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1339
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1340
+
1341
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1342
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1343
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1344
+ use_cache (`bool`, *optional*):
1345
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1346
+ `past_key_values`).
1347
+
1348
+ Returns:
1349
+
1350
+ Examples:
1351
+
1352
+ Image captioning example:
1353
+
1354
+ ```python
1355
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1356
+ >>> import requests
1357
+ >>> from PIL import Image
1358
+
1359
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco")
1360
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
1361
+
1362
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1363
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1364
+
1365
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
1366
+
1367
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
1368
+ >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1369
+ >>> print(generated_caption)
1370
+ two cats sleeping on a pink blanket next to remotes.
1371
+ ```
1372
+
1373
+ Visual question answering (VQA) example:
1374
+
1375
+ ```python
1376
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1377
+ >>> from huggingface_hub import hf_hub_download
1378
+ >>> from PIL import Image
1379
+
1380
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa")
1381
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa")
1382
+
1383
+ >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
1384
+ >>> image = Image.open(file_path).convert("RGB")
1385
+
1386
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
1387
+
1388
+ >>> question = "what does the front of the bus say at the top?"
1389
+
1390
+ >>> input_ids = processor(text=question, add_special_tokens=False).input_ids
1391
+ >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
1392
+ >>> input_ids = torch.tensor(input_ids).unsqueeze(0)
1393
+
1394
+ >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
1395
+ >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
1396
+ ['what does the front of the bus say at the top? special']
1397
+ ```
1398
+
1399
+ Video captioning example:
1400
+
1401
+ ```python
1402
+ >>> import av
1403
+ >>> import numpy as np
1404
+ >>> from PIL import Image
1405
+ >>> from huggingface_hub import hf_hub_download
1406
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
1407
+
1408
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex")
1409
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex")
1410
+
1411
+ >>> # set seed for reproducability
1412
+ >>> np.random.seed(45)
1413
+
1414
+
1415
+ >>> def read_video_pyav(container, indices):
1416
+ ... '''
1417
+ ... Decode the video with PyAV decoder.
1418
+ ... Args:
1419
+ ... container (`av.container.input.InputContainer`): PyAV container.
1420
+ ... indices (`List[int]`): List of frame indices to decode.
1421
+ ... Returns:
1422
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
1423
+ ... '''
1424
+ ... frames = []
1425
+ ... container.seek(0)
1426
+ ... start_index = indices[0]
1427
+ ... end_index = indices[-1]
1428
+ ... for i, frame in enumerate(container.decode(video=0)):
1429
+ ... if i > end_index:
1430
+ ... break
1431
+ ... if i >= start_index and i in indices:
1432
+ ... frames.append(frame)
1433
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
1434
+
1435
+
1436
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
1437
+ ... '''
1438
+ ... Sample a given number of frame indices from the video.
1439
+ ... Args:
1440
+ ... clip_len (`int`): Total number of frames to sample.
1441
+ ... frame_sample_rate (`int`): Sample every n-th frame.
1442
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
1443
+ ... Returns:
1444
+ ... indices (`List[int]`): List of sampled frame indices
1445
+ ... '''
1446
+ ... converted_len = int(clip_len * frame_sample_rate)
1447
+ ... end_idx = np.random.randint(converted_len, seg_len)
1448
+ ... start_idx = end_idx - converted_len
1449
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
1450
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
1451
+ ... return indices
1452
+
1453
+
1454
+ >>> # load video
1455
+ >>> file_path = hf_hub_download(
1456
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
1457
+ ... )
1458
+ >>> container = av.open(file_path)
1459
+
1460
+ >>> # sample frames
1461
+ >>> num_frames = model.config.num_image_with_embedding
1462
+ >>> indices = sample_frame_indices(
1463
+ ... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
1464
+ ... )
1465
+ >>> frames = read_video_pyav(container, indices)
1466
+
1467
+ >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values
1468
+
1469
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
1470
+
1471
+ >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
1472
+ Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
1473
+ ```
1474
+ """
1475
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1476
+ if labels is not None:
1477
+ use_cache = False
1478
+
1479
+ outputs = self.git(
1480
+ input_ids,
1481
+ attention_mask=attention_mask,
1482
+ position_ids=position_ids,
1483
+ pixel_values=pixel_values,
1484
+ head_mask=head_mask,
1485
+ inputs_embeds=inputs_embeds,
1486
+ past_key_values=past_key_values,
1487
+ use_cache=use_cache,
1488
+ output_attentions=output_attentions,
1489
+ output_hidden_states=output_hidden_states,
1490
+ return_dict=return_dict,
1491
+ )
1492
+
1493
+ sequence_output = outputs[0]
1494
+ logits = self.output(sequence_output)
1495
+
1496
+ loss = None
1497
+ if labels is not None:
1498
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1499
+ num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens
1500
+ shifted_logits = logits[:, num_image_tokens:-1, :].contiguous()
1501
+ labels = labels[:, 1:].contiguous()
1502
+ loss_fct = CrossEntropyLoss()
1503
+ loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1))
1504
+
1505
+ if not return_dict:
1506
+ output = (logits,) + outputs[1:]
1507
+ return ((loss,) + output) if loss is not None else output
1508
+
1509
+ return CausalLMOutputWithPast(
1510
+ loss=loss,
1511
+ logits=logits,
1512
+ past_key_values=outputs.past_key_values,
1513
+ hidden_states=outputs.hidden_states,
1514
+ attentions=outputs.attentions,
1515
+ )
1516
+
1517
+ def prepare_inputs_for_generation(
1518
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1519
+ ):
1520
+ # cut decoder_input_ids if past_key_values is used
1521
+ if past_key_values is not None:
1522
+ input_ids = input_ids[:, -1:]
1523
+
1524
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1525
+ input_shape = input_ids.shape
1526
+ if attention_mask is None:
1527
+ attention_mask = input_ids.new_ones(input_shape)
1528
+
1529
+ return {
1530
+ "input_ids": input_ids,
1531
+ "attention_mask": attention_mask,
1532
+ "pixel_values": kwargs.get("pixel_values", None),
1533
+ "past_key_values": past_key_values,
1534
+ "use_cache": use_cache,
1535
+ }
1536
+
1537
+ def _reorder_cache(self, past_key_values, beam_idx):
1538
+ reordered_past = ()
1539
+ for layer_past in past_key_values:
1540
+ reordered_past += (
1541
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1542
+ )
1543
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/git/processing_git.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for GIT
17
+ """
18
+
19
+ from ...processing_utils import ProcessorMixin
20
+ from ...tokenization_utils_base import BatchEncoding
21
+
22
+
23
+ class GitProcessor(ProcessorMixin):
24
+ r"""
25
+ Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
26
+
27
+ [`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
28
+ [`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.
29
+
30
+ Args:
31
+ image_processor ([`AutoImageProcessor`]):
32
+ The image processor is a required input.
33
+ tokenizer ([`AutoTokenizer`]):
34
+ The tokenizer is a required input.
35
+ """
36
+
37
+ attributes = ["image_processor", "tokenizer"]
38
+ image_processor_class = "AutoImageProcessor"
39
+ tokenizer_class = "AutoTokenizer"
40
+
41
+ def __init__(self, image_processor, tokenizer):
42
+ super().__init__(image_processor, tokenizer)
43
+ self.current_processor = self.image_processor
44
+
45
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
46
+ """
47
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
48
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
49
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
50
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
51
+ of the above two methods for more information.
52
+
53
+ Args:
54
+ text (`str`, `List[str]`, `List[List[str]]`):
55
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
56
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
57
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
58
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
59
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
60
+ tensor. Both channels-first and channels-last formats are supported.
61
+
62
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
63
+ If set, will return tensors of a particular framework. Acceptable values are:
64
+
65
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
66
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
67
+ - `'np'`: Return NumPy `np.ndarray` objects.
68
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
69
+
70
+ Returns:
71
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
72
+
73
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
74
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
75
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
76
+ `None`).
77
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
78
+ """
79
+
80
+ if text is None and images is None:
81
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
82
+
83
+ if text is not None:
84
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
85
+
86
+ if images is not None:
87
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
88
+
89
+ if text is not None and images is not None:
90
+ encoding["pixel_values"] = image_features.pixel_values
91
+ return encoding
92
+ elif text is not None:
93
+ return encoding
94
+ else:
95
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
96
+
97
+ def batch_decode(self, *args, **kwargs):
98
+ """
99
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
100
+ refer to the docstring of this method for more information.
101
+ """
102
+ return self.tokenizer.batch_decode(*args, **kwargs)
103
+
104
+ def decode(self, *args, **kwargs):
105
+ """
106
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
107
+ the docstring of this method for more information.
108
+ """
109
+ return self.tokenizer.decode(*args, **kwargs)
110
+
111
+ @property
112
+ def model_input_names(self):
113
+ return ["input_ids", "attention_mask", "pixel_values"]
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_megatron_bert"] = [
30
+ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "MegatronBertForCausalLM",
32
+ "MegatronBertForMaskedLM",
33
+ "MegatronBertForMultipleChoice",
34
+ "MegatronBertForNextSentencePrediction",
35
+ "MegatronBertForPreTraining",
36
+ "MegatronBertForQuestionAnswering",
37
+ "MegatronBertForSequenceClassification",
38
+ "MegatronBertForTokenClassification",
39
+ "MegatronBertModel",
40
+ "MegatronBertPreTrainedModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_megatron_bert import (
53
+ MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ MegatronBertForCausalLM,
55
+ MegatronBertForMaskedLM,
56
+ MegatronBertForMultipleChoice,
57
+ MegatronBertForNextSentencePrediction,
58
+ MegatronBertForPreTraining,
59
+ MegatronBertForQuestionAnswering,
60
+ MegatronBertForSequenceClassification,
61
+ MegatronBertForTokenClassification,
62
+ MegatronBertModel,
63
+ MegatronBertPreTrainedModel,
64
+ )
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/configuration_megatron_bert.cpython-310.pyc ADDED
Binary file (5.88 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/convert_megatron_bert_checkpoint.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/modeling_megatron_bert.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/configuration_megatron_bert.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021- NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MEGATRON_BERT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class MegatronBertConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`MegatronBertModel`]. It is used to instantiate a
30
+ MEGATRON_BERT model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the MEGATRON_BERT
32
+ [nvidia/megatron-bert-uncased-345m](https://huggingface.co/nvidia/megatron-bert-uncased-345m) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 29056):
40
+ Vocabulary size of the MEGATRON_BERT model. Defines the number of different tokens that can be represented
41
+ by the `inputs_ids` passed when calling [`MegatronBertModel`].
42
+ hidden_size (`int`, *optional*, defaults to 1024):
43
+ Dimensionality of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 24):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 16):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 4096):
49
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
53
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
54
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
55
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention probabilities.
57
+ max_position_embeddings (`int`, *optional*, defaults to 512):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ type_vocab_size (`int`, *optional*, defaults to 2):
61
+ The vocabulary size of the `token_type_ids` passed when calling [`MegatronBertModel`].
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the layer normalization layers.
66
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
67
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
68
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
69
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
70
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
71
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
72
+ is_decoder (`bool`, *optional*, defaults to `False`):
73
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
74
+ use_cache (`bool`, *optional*, defaults to `True`):
75
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
76
+ relevant if `config.is_decoder=True`.
77
+
78
+ Examples:
79
+
80
+ ```python
81
+ >>> from transformers import MegatronBertConfig, MegatronBertModel
82
+
83
+ >>> # Initializing a MEGATRON_BERT google-bert/bert-base-uncased style configuration
84
+ >>> configuration = MegatronBertConfig()
85
+
86
+ >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration
87
+ >>> model = MegatronBertModel(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+ ```"""
92
+
93
+ model_type = "megatron-bert"
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_size=29056,
98
+ hidden_size=1024,
99
+ num_hidden_layers=24,
100
+ num_attention_heads=16,
101
+ intermediate_size=4096,
102
+ hidden_act="gelu",
103
+ hidden_dropout_prob=0.1,
104
+ attention_probs_dropout_prob=0.1,
105
+ max_position_embeddings=512,
106
+ type_vocab_size=2,
107
+ initializer_range=0.02,
108
+ layer_norm_eps=1e-12,
109
+ pad_token_id=0,
110
+ position_embedding_type="absolute",
111
+ use_cache=True,
112
+ **kwargs,
113
+ ):
114
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
115
+
116
+ self.vocab_size = vocab_size
117
+ self.hidden_size = hidden_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.hidden_act = hidden_act
121
+ self.intermediate_size = intermediate_size
122
+ self.hidden_dropout_prob = hidden_dropout_prob
123
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
124
+ self.max_position_embeddings = max_position_embeddings
125
+ self.type_vocab_size = type_vocab_size
126
+ self.initializer_range = initializer_range
127
+ self.layer_norm_eps = layer_norm_eps
128
+ self.position_embedding_type = position_embedding_type
129
+ self.use_cache = use_cache
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ####################################################################################################
2
+
3
+ # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ ####################################################################################################
18
+
19
+ #
20
+ # Note: If when running this conversion script you're getting an exception:
21
+ # ModuleNotFoundError: No module named 'megatron.model.enums'
22
+ # you need to tell python where to find the clone of Megatron-LM, e.g.:
23
+ #
24
+ # cd /tmp
25
+ # git clone https://github.com/NVIDIA/Megatron-LM
26
+ # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ...
27
+ #
28
+ # if you already have it cloned elsewhere, simply adjust the path to the existing path
29
+ #
30
+ # If the training was done using a Megatron-LM fork, e.g.,
31
+ # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
32
+ # in your path, i.e., /path/to/Megatron-DeepSpeed/
33
+ #
34
+
35
+ import argparse
36
+ import os
37
+ import re
38
+ import zipfile
39
+
40
+ import torch
41
+
42
+ from transformers import MegatronBertConfig
43
+
44
+
45
+ ####################################################################################################
46
+
47
+
48
+ def recursive_print(name, val, spaces=0):
49
+ # Format the message.
50
+ if name is None:
51
+ msg = None
52
+ else:
53
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
54
+ msg = fmt.format(name)
55
+
56
+ # Print and recurse (if needed).
57
+ if isinstance(val, dict):
58
+ if msg is not None:
59
+ print(msg)
60
+ for k in val.keys():
61
+ recursive_print(k, val[k], spaces + 2)
62
+ elif isinstance(val, torch.Tensor):
63
+ print(msg, ":", val.size())
64
+ else:
65
+ print(msg, ":", val)
66
+
67
+
68
+ def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):
69
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
70
+ # for compatibility with later versions of NVIDIA Megatron-LM.
71
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
72
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
73
+ # If param is the weight tensor of the self-attention block, the returned tensor
74
+ # will have to be transposed one more time to be read by HuggingFace BERT.
75
+ input_shape = param.size()
76
+ if checkpoint_version == 1.0:
77
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
78
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
79
+ param = param.view(*saved_shape)
80
+ param = param.transpose(0, 2)
81
+ param = param.transpose(1, 2).contiguous()
82
+ elif checkpoint_version >= 2.0:
83
+ # other versions store [num_heads * num_splits * hidden_size, :]
84
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
85
+ param = param.view(*saved_shape)
86
+ param = param.transpose(0, 1).contiguous()
87
+ param = param.view(*input_shape)
88
+ return param
89
+
90
+
91
+ ####################################################################################################
92
+
93
+
94
+ def convert_megatron_checkpoint(args, input_state_dict, config):
95
+ # The converted output model.
96
+ output_state_dict = {}
97
+
98
+ # old versions did not store training args
99
+ ds_args = input_state_dict.get("args", None)
100
+ if ds_args is not None:
101
+ # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
102
+ # from pprint import pprint
103
+ # pprint(vars(ds_args))
104
+
105
+ config.tokenizer_type = ds_args.tokenizer_type
106
+ config.vocab_size = ds_args.padded_vocab_size
107
+ config.max_position_embeddings = ds_args.max_position_embeddings
108
+ config.hidden_size = ds_args.hidden_size
109
+ config.num_hidden_layers = ds_args.num_layers
110
+ config.num_attention_heads = ds_args.num_attention_heads
111
+ config.intermediate_size = ds_args.ffn_hidden_size if "ffn_hidden_size" in ds_args else 4 * ds_args.hidden_size
112
+ # pprint(config)
113
+
114
+ # The number of heads.
115
+ heads = config.num_attention_heads
116
+ # The hidden_size per head.
117
+ hidden_size_per_head = config.hidden_size // heads
118
+ # Megatron-LM checkpoint version
119
+ if "checkpoint_version" in input_state_dict.keys():
120
+ checkpoint_version = input_state_dict["checkpoint_version"]
121
+ else:
122
+ checkpoint_version = 0.0
123
+
124
+ # The model.
125
+ model = input_state_dict["model"]
126
+ # The language model.
127
+ lm = model["language_model"]
128
+ # The embeddings.
129
+ embeddings = lm["embedding"]
130
+
131
+ # The word embeddings.
132
+ word_embeddings = embeddings["word_embeddings"]["weight"]
133
+ # Truncate the embedding table to vocab_size rows.
134
+ word_embeddings = word_embeddings[: config.vocab_size, :]
135
+ # Store the word embeddings.
136
+ output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings
137
+
138
+ # The position embeddings.
139
+ pos_embeddings = embeddings["position_embeddings"]["weight"]
140
+ assert pos_embeddings.size(0) == config.max_position_embeddings and pos_embeddings.size(1) == config.hidden_size
141
+ # Store the position embeddings.
142
+ output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings
143
+
144
+ # The token-type embeddings.
145
+ tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"]
146
+ # Store the position embeddings.
147
+ output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings
148
+
149
+ # The transformer.
150
+ transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
151
+
152
+ # The regex to extract layer names.
153
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
154
+
155
+ # The simple map of names for "automated" rules.
156
+ megatron_to_transformers = {
157
+ "attention.dense": ".attention.output.dense.",
158
+ "self_attention.dense": ".attention.output.dense.",
159
+ "mlp.dense_h_to_4h": ".intermediate.dense.",
160
+ "mlp.dense_4h_to_h": ".output.dense.",
161
+ }
162
+
163
+ # Keep track of the attention/query/value tensor.
164
+ attention_qkv_weight = None
165
+
166
+ # Extract the layers.
167
+ for key, val in transformer.items():
168
+ # Match the name.
169
+ m = layer_re.match(key)
170
+
171
+ # Stop if that's not a layer
172
+ if m is None:
173
+ break
174
+
175
+ # The index of the layer.
176
+ layer_idx = int(m.group(1))
177
+ # The name of the operation.
178
+ op_name = m.group(2)
179
+ # Is it a weight or a bias?
180
+ weight_or_bias = m.group(3)
181
+
182
+ # The name of the layer.
183
+ layer_name = f"bert.encoder.layer.{layer_idx}"
184
+
185
+ # For layernorm(s), simply store the layer norm.
186
+ if op_name.endswith("layernorm"):
187
+ ln_name = "attention.ln" if op_name.startswith("input") else "ln"
188
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
189
+
190
+ # Transpose the QKV matrix.
191
+ elif (
192
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
193
+ ) and weight_or_bias == "weight":
194
+ # Make sure the QKV pointer is nil.
195
+ assert attention_qkv_weight is None, ""
196
+
197
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
198
+ # Store the tensor as we need the bias as well to interleave QKV and biases.
199
+ attention_qkv_weight = out_val
200
+
201
+ # Transpose the bias.
202
+ elif (
203
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
204
+ ) and weight_or_bias == "bias":
205
+ # Make sure we read the weight tensor.
206
+ assert attention_qkv_weight is not None, ""
207
+
208
+ # Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved.
209
+ q = attention_qkv_weight[0 * config.hidden_size : 1 * config.hidden_size, :]
210
+ k = attention_qkv_weight[1 * config.hidden_size : 2 * config.hidden_size, :]
211
+ v = attention_qkv_weight[2 * config.hidden_size : 3 * config.hidden_size, :]
212
+
213
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
214
+ # Split the bias.
215
+ q_bias = out_val[0 * config.hidden_size : 1 * config.hidden_size]
216
+ k_bias = out_val[1 * config.hidden_size : 2 * config.hidden_size]
217
+ v_bias = out_val[2 * config.hidden_size : 3 * config.hidden_size]
218
+
219
+ # Store.
220
+ output_state_dict[f"{layer_name}.attention.self.query.weight"] = q
221
+ output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias
222
+ output_state_dict[f"{layer_name}.attention.self.key.weight"] = k
223
+ output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias
224
+ output_state_dict[f"{layer_name}.attention.self.value.weight"] = v
225
+ output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias
226
+
227
+ # Clear the stored tensor.
228
+ attention_qkv_weight = None
229
+
230
+ # Copy weights and biases as is.
231
+ elif weight_or_bias in ["weight", "bias"]:
232
+ out_name = megatron_to_transformers[op_name]
233
+ output_state_dict[layer_name + out_name + weight_or_bias] = val
234
+
235
+ # The final layernorm.
236
+ output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"]
237
+ output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"]
238
+
239
+ # The pooler.
240
+ pooler = lm["pooler"]
241
+
242
+ # Store the matrix and the bias.
243
+ output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"]
244
+ output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"]
245
+
246
+ # The LM head from Megatron (for RACE).
247
+ lm_head = model["lm_head"]
248
+
249
+ # The transform matrix.
250
+ output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"]
251
+ output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"]
252
+
253
+ # The transform LN.
254
+ output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"]
255
+ output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"]
256
+
257
+ # For the decoder, we replicate the weights.
258
+ output_state_dict["cls.predictions.decoder.weight"] = word_embeddings
259
+ output_state_dict["cls.predictions.bias"] = lm_head["bias"]
260
+
261
+ # The classifier from Megatron (for MLNI).
262
+ binary_head = model["binary_head"]
263
+
264
+ # Store the classifier.
265
+ output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"]
266
+ output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"]
267
+
268
+ # It should be done!
269
+ return output_state_dict
270
+
271
+
272
+ ####################################################################################################
273
+
274
+
275
+ def main():
276
+ # Create the argument parser.
277
+ parser = argparse.ArgumentParser()
278
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
279
+ parser.add_argument("path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint")
280
+ parser.add_argument(
281
+ "--config_file",
282
+ default="",
283
+ type=str,
284
+ help="An optional config json file describing the pre-trained model.",
285
+ )
286
+ args = parser.parse_args()
287
+
288
+ # Extract the basename.
289
+ basename = os.path.dirname(args.path_to_checkpoint)
290
+
291
+ # Load the model.
292
+ # the .zip is very optional, let's keep it for backward compatibility
293
+ print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"')
294
+ if args.path_to_checkpoint.endswith(".zip"):
295
+ with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
296
+ with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
297
+ input_state_dict = torch.load(pytorch_dict, map_location="cpu")
298
+ else:
299
+ input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
300
+
301
+ if args.config_file == "":
302
+ # Default config of megatron-bert 345m
303
+ config = MegatronBertConfig()
304
+
305
+ # different megatron-bert-*-345m models have different vocab sizes, so override the default
306
+ # config (which is for megatron-bert-cased-345m) with the actual vocab dimension
307
+ config.vocab_size = input_state_dict["model"]["lm_head"]["bias"].numel()
308
+ else:
309
+ config = MegatronBertConfig.from_json_file(args.config_file)
310
+
311
+ # Convert.
312
+ print("Converting")
313
+ output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
314
+
315
+ # Print the structure of converted state dict.
316
+ if args.print_checkpoint_structure:
317
+ recursive_print(None, output_state_dict)
318
+
319
+ # Store the config to file.
320
+ print("Saving config")
321
+ config.save_pretrained(basename)
322
+
323
+ # Store the state_dict to file.
324
+ output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
325
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
326
+ torch.save(output_state_dict, output_checkpoint_file)
327
+
328
+
329
+ ####################################################################################################
330
+
331
+ if __name__ == "__main__":
332
+ main()
333
+
334
+ ####################################################################################################
venv/lib/python3.10/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py ADDED
@@ -0,0 +1,1836 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch MegatronBERT model."""
17
+
18
+
19
+ import math
20
+ import os
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+
30
+ from ...activations import ACT2FN
31
+ from ...modeling_outputs import (
32
+ BaseModelOutputWithPastAndCrossAttentions,
33
+ BaseModelOutputWithPoolingAndCrossAttentions,
34
+ CausalLMOutputWithCrossAttentions,
35
+ MaskedLMOutput,
36
+ MultipleChoiceModelOutput,
37
+ NextSentencePredictorOutput,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutput,
40
+ TokenClassifierOutput,
41
+ )
42
+ from ...modeling_utils import PreTrainedModel
43
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
44
+ from ...utils import (
45
+ ModelOutput,
46
+ add_code_sample_docstrings,
47
+ add_start_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_megatron_bert import MegatronBertConfig
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CONFIG_FOR_DOC = "MegatronBertConfig"
58
+ _CHECKPOINT_FOR_DOC = "nvidia/megatron-bert-cased-345m"
59
+
60
+
61
+ from ..deprecated._archive_maps import MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
62
+
63
+
64
+ def load_tf_weights_in_megatron_bert(model, config, tf_checkpoint_path):
65
+ """Load tf checkpoints in a pytorch model."""
66
+ try:
67
+ import re
68
+
69
+ import numpy as np
70
+ import tensorflow as tf
71
+ except ImportError:
72
+ logger.error(
73
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
74
+ "https://www.tensorflow.org/install/ for installation instructions."
75
+ )
76
+ raise
77
+ tf_path = os.path.abspath(tf_checkpoint_path)
78
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
79
+ # Load weights from TF model
80
+ init_vars = tf.train.list_variables(tf_path)
81
+ names = []
82
+ arrays = []
83
+ for name, shape in init_vars:
84
+ logger.info(f"Loading TF weight {name} with shape {shape}")
85
+ array = tf.train.load_variable(tf_path, name)
86
+ names.append(name)
87
+ arrays.append(array)
88
+
89
+ for name, array in zip(names, arrays):
90
+ name = name.split("/")
91
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
92
+ # which are not required for using pretrained model
93
+ if any(
94
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
95
+ for n in name
96
+ ):
97
+ logger.info(f"Skipping {'/'.join(name)}")
98
+ continue
99
+ pointer = model
100
+ for m_name in name:
101
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
102
+ scope_names = re.split(r"_(\d+)", m_name)
103
+ else:
104
+ scope_names = [m_name]
105
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
106
+ pointer = getattr(pointer, "weight")
107
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
108
+ pointer = getattr(pointer, "bias")
109
+ elif scope_names[0] == "output_weights":
110
+ pointer = getattr(pointer, "weight")
111
+ elif scope_names[0] == "squad":
112
+ pointer = getattr(pointer, "classifier")
113
+ else:
114
+ try:
115
+ pointer = getattr(pointer, scope_names[0])
116
+ except AttributeError:
117
+ logger.info(f"Skipping {'/'.join(name)}")
118
+ continue
119
+ if len(scope_names) >= 2:
120
+ num = int(scope_names[1])
121
+ pointer = pointer[num]
122
+ if m_name[-11:] == "_embeddings":
123
+ pointer = getattr(pointer, "weight")
124
+ elif m_name == "kernel":
125
+ array = np.transpose(array)
126
+ if pointer.shape != array.shape:
127
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
128
+ logger.info("Initialize PyTorch weight {}".format(name))
129
+ pointer.data = torch.from_numpy(array)
130
+ return model
131
+
132
+
133
+ class MegatronBertEmbeddings(nn.Module):
134
+ """Construct the embeddings from word, position and token_type embeddings."""
135
+
136
+ def __init__(self, config):
137
+ super().__init__()
138
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
139
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
140
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
141
+
142
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
143
+ # any TensorFlow checkpoint file
144
+
145
+ # In Megatron, layer-norm is applied after the 1st dropout.
146
+ # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
147
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
148
+
149
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
150
+ self.register_buffer(
151
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
152
+ )
153
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
154
+
155
+ def forward(
156
+ self,
157
+ input_ids: Optional[torch.LongTensor] = None,
158
+ token_type_ids: Optional[torch.LongTensor] = None,
159
+ position_ids: Optional[torch.LongTensor] = None,
160
+ inputs_embeds: Optional[torch.LongTensor] = None,
161
+ past_key_values_length: int = 0,
162
+ ) -> torch.Tensor:
163
+ if input_ids is not None:
164
+ input_shape = input_ids.size()
165
+ else:
166
+ input_shape = inputs_embeds.size()[:-1]
167
+
168
+ seq_length = input_shape[1]
169
+
170
+ if position_ids is None:
171
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
172
+
173
+ if token_type_ids is None:
174
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
175
+
176
+ if inputs_embeds is None:
177
+ inputs_embeds = self.word_embeddings(input_ids)
178
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
179
+
180
+ embeddings = inputs_embeds + token_type_embeddings
181
+ if self.position_embedding_type == "absolute":
182
+ position_embeddings = self.position_embeddings(position_ids)
183
+ embeddings += position_embeddings
184
+
185
+ # Megatron BERT moves that layer norm after the drop-out (and to each layer).
186
+ # embeddings = self.LayerNorm(embeddings)
187
+ embeddings = self.dropout(embeddings)
188
+ return embeddings
189
+
190
+
191
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MegatronBert
192
+ class MegatronBertSelfAttention(nn.Module):
193
+ def __init__(self, config, position_embedding_type=None):
194
+ super().__init__()
195
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
196
+ raise ValueError(
197
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
198
+ f"heads ({config.num_attention_heads})"
199
+ )
200
+
201
+ self.num_attention_heads = config.num_attention_heads
202
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
203
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
204
+
205
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
206
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
207
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
208
+
209
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
210
+ self.position_embedding_type = position_embedding_type or getattr(
211
+ config, "position_embedding_type", "absolute"
212
+ )
213
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
214
+ self.max_position_embeddings = config.max_position_embeddings
215
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
216
+
217
+ self.is_decoder = config.is_decoder
218
+
219
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
220
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
221
+ x = x.view(new_x_shape)
222
+ return x.permute(0, 2, 1, 3)
223
+
224
+ def forward(
225
+ self,
226
+ hidden_states: torch.Tensor,
227
+ attention_mask: Optional[torch.FloatTensor] = None,
228
+ head_mask: Optional[torch.FloatTensor] = None,
229
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
230
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
231
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
232
+ output_attentions: Optional[bool] = False,
233
+ ) -> Tuple[torch.Tensor]:
234
+ mixed_query_layer = self.query(hidden_states)
235
+
236
+ # If this is instantiated as a cross-attention module, the keys
237
+ # and values come from an encoder; the attention mask needs to be
238
+ # such that the encoder's padding tokens are not attended to.
239
+ is_cross_attention = encoder_hidden_states is not None
240
+
241
+ if is_cross_attention and past_key_value is not None:
242
+ # reuse k,v, cross_attentions
243
+ key_layer = past_key_value[0]
244
+ value_layer = past_key_value[1]
245
+ attention_mask = encoder_attention_mask
246
+ elif is_cross_attention:
247
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
248
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
249
+ attention_mask = encoder_attention_mask
250
+ elif past_key_value is not None:
251
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
252
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
253
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
254
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
255
+ else:
256
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
257
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
258
+
259
+ query_layer = self.transpose_for_scores(mixed_query_layer)
260
+
261
+ use_cache = past_key_value is not None
262
+ if self.is_decoder:
263
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
264
+ # Further calls to cross_attention layer can then reuse all cross-attention
265
+ # key/value_states (first "if" case)
266
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
267
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
268
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
269
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
270
+ past_key_value = (key_layer, value_layer)
271
+
272
+ # Take the dot product between "query" and "key" to get the raw attention scores.
273
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
274
+
275
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
276
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
277
+ if use_cache:
278
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
279
+ -1, 1
280
+ )
281
+ else:
282
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
283
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
284
+ distance = position_ids_l - position_ids_r
285
+
286
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
287
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
288
+
289
+ if self.position_embedding_type == "relative_key":
290
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
291
+ attention_scores = attention_scores + relative_position_scores
292
+ elif self.position_embedding_type == "relative_key_query":
293
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
294
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
295
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
296
+
297
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
298
+ if attention_mask is not None:
299
+ # Apply the attention mask is (precomputed for all layers in MegatronBertModel forward() function)
300
+ attention_scores = attention_scores + attention_mask
301
+
302
+ # Normalize the attention scores to probabilities.
303
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
304
+
305
+ # This is actually dropping out entire tokens to attend to, which might
306
+ # seem a bit unusual, but is taken from the original Transformer paper.
307
+ attention_probs = self.dropout(attention_probs)
308
+
309
+ # Mask heads if we want to
310
+ if head_mask is not None:
311
+ attention_probs = attention_probs * head_mask
312
+
313
+ context_layer = torch.matmul(attention_probs, value_layer)
314
+
315
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
316
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
317
+ context_layer = context_layer.view(new_context_layer_shape)
318
+
319
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
320
+
321
+ if self.is_decoder:
322
+ outputs = outputs + (past_key_value,)
323
+ return outputs
324
+
325
+
326
+ # Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to MegatronBertAttention below.
327
+ class MegatronBertSelfOutput(nn.Module):
328
+ def __init__(self, config):
329
+ super().__init__()
330
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
331
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
332
+
333
+ def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
334
+ hidden_states = self.dense(hidden_states)
335
+ hidden_states = self.dropout(hidden_states)
336
+ return residual + hidden_states
337
+
338
+
339
+ # Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm.
340
+ class MegatronBertAttention(nn.Module):
341
+ def __init__(self, config):
342
+ super().__init__()
343
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
344
+ self.self = MegatronBertSelfAttention(config)
345
+ self.output = MegatronBertSelfOutput(config)
346
+ self.pruned_heads = set()
347
+
348
+ def prune_heads(self, heads):
349
+ if len(heads) == 0:
350
+ return
351
+ heads, index = find_pruneable_heads_and_indices(
352
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
353
+ )
354
+
355
+ # Prune linear layers
356
+ self.self.query = prune_linear_layer(self.self.query, index)
357
+ self.self.key = prune_linear_layer(self.self.key, index)
358
+ self.self.value = prune_linear_layer(self.self.value, index)
359
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
360
+
361
+ # Update hyper params and store pruned heads
362
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
363
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
364
+ self.pruned_heads = self.pruned_heads.union(heads)
365
+
366
+ def forward(
367
+ self,
368
+ hidden_states: torch.Tensor,
369
+ attention_mask: Optional[torch.FloatTensor] = None,
370
+ head_mask: Optional[torch.FloatTensor] = None,
371
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
372
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
373
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
374
+ output_attentions: Optional[bool] = False,
375
+ ) -> Tuple[torch.Tensor]:
376
+ ln_outputs = self.ln(hidden_states)
377
+ self_outputs = self.self(
378
+ ln_outputs,
379
+ attention_mask,
380
+ head_mask,
381
+ encoder_hidden_states,
382
+ encoder_attention_mask,
383
+ past_key_value,
384
+ output_attentions,
385
+ )
386
+ attention_output = self.output(self_outputs[0], hidden_states)
387
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
388
+ return outputs
389
+
390
+
391
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->MegatronBert
392
+ class MegatronBertIntermediate(nn.Module):
393
+ def __init__(self, config):
394
+ super().__init__()
395
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
396
+ if isinstance(config.hidden_act, str):
397
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
398
+ else:
399
+ self.intermediate_act_fn = config.hidden_act
400
+
401
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
402
+ hidden_states = self.dense(hidden_states)
403
+ hidden_states = self.intermediate_act_fn(hidden_states)
404
+ return hidden_states
405
+
406
+
407
+ # Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to MegatronBertLayer below.
408
+ class MegatronBertOutput(nn.Module):
409
+ def __init__(self, config):
410
+ super().__init__()
411
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
412
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
413
+
414
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
415
+ hidden_states = self.dense(hidden_states)
416
+ hidden_states = self.dropout(hidden_states)
417
+ return input_tensor + hidden_states
418
+
419
+
420
+ # Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm.
421
+ class MegatronBertLayer(nn.Module):
422
+ def __init__(self, config):
423
+ super().__init__()
424
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
425
+ self.seq_len_dim = 1
426
+ self.attention = MegatronBertAttention(config)
427
+ self.is_decoder = config.is_decoder
428
+ self.add_cross_attention = config.add_cross_attention
429
+ if self.add_cross_attention:
430
+ if not self.is_decoder:
431
+ raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
432
+ self.crossattention = MegatronBertAttention(config)
433
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
434
+ self.intermediate = MegatronBertIntermediate(config)
435
+ self.output = MegatronBertOutput(config)
436
+
437
+ def forward(
438
+ self,
439
+ hidden_states: torch.Tensor,
440
+ attention_mask: Optional[torch.FloatTensor] = None,
441
+ head_mask: Optional[torch.FloatTensor] = None,
442
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
443
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
444
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
445
+ output_attentions: Optional[bool] = False,
446
+ ) -> Tuple[torch.Tensor]:
447
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
448
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
449
+ self_attention_outputs = self.attention(
450
+ hidden_states,
451
+ attention_mask,
452
+ head_mask,
453
+ output_attentions=output_attentions,
454
+ past_key_value=self_attn_past_key_value,
455
+ )
456
+ attention_output = self_attention_outputs[0]
457
+
458
+ # if decoder, the last output is tuple of self-attn cache
459
+ if self.is_decoder:
460
+ outputs = self_attention_outputs[1:-1]
461
+ present_key_value = self_attention_outputs[-1]
462
+ else:
463
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
464
+
465
+ cross_attn_present_key_value = None
466
+ if self.is_decoder and encoder_hidden_states is not None:
467
+ if not hasattr(self, "crossattention"):
468
+ raise AttributeError(
469
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
470
+ " by setting `config.add_cross_attention=True`"
471
+ )
472
+
473
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
474
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
475
+ cross_attention_outputs = self.crossattention(
476
+ attention_output,
477
+ attention_mask,
478
+ head_mask,
479
+ encoder_hidden_states,
480
+ encoder_attention_mask,
481
+ cross_attn_past_key_value,
482
+ output_attentions,
483
+ )
484
+ attention_output = cross_attention_outputs[0]
485
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
486
+
487
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
488
+ cross_attn_present_key_value = cross_attention_outputs[-1]
489
+ present_key_value = present_key_value + cross_attn_present_key_value
490
+
491
+ layer_output = apply_chunking_to_forward(
492
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
493
+ )
494
+ outputs = (layer_output,) + outputs
495
+
496
+ # if decoder, return the attn key/values as the last output
497
+ if self.is_decoder:
498
+ outputs = outputs + (present_key_value,)
499
+
500
+ return outputs
501
+
502
+ def feed_forward_chunk(self, attention_output):
503
+ ln_output = self.ln(attention_output)
504
+ intermediate_output = self.intermediate(ln_output)
505
+ layer_output = self.output(intermediate_output, attention_output)
506
+ return layer_output
507
+
508
+
509
+ class MegatronBertEncoder(nn.Module):
510
+ def __init__(self, config):
511
+ super().__init__()
512
+ self.config = config
513
+ self.layer = nn.ModuleList([MegatronBertLayer(config) for _ in range(config.num_hidden_layers)])
514
+
515
+ # The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one
516
+ # is simply the final LN (Transformer's BERT has it attached to each hidden layer).
517
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
518
+ self.gradient_checkpointing = False
519
+
520
+ def forward(
521
+ self,
522
+ hidden_states: torch.Tensor,
523
+ attention_mask: Optional[torch.FloatTensor] = None,
524
+ head_mask: Optional[torch.FloatTensor] = None,
525
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
526
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
527
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
528
+ use_cache: Optional[bool] = None,
529
+ output_attentions: Optional[bool] = False,
530
+ output_hidden_states: Optional[bool] = False,
531
+ return_dict: Optional[bool] = True,
532
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
533
+ if self.gradient_checkpointing and self.training:
534
+ if use_cache:
535
+ logger.warning_once(
536
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
537
+ )
538
+ use_cache = False
539
+ all_hidden_states = () if output_hidden_states else None
540
+ all_self_attentions = () if output_attentions else None
541
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
542
+
543
+ next_decoder_cache = () if use_cache else None
544
+ for i, layer_module in enumerate(self.layer):
545
+ if output_hidden_states:
546
+ all_hidden_states = all_hidden_states + (hidden_states,)
547
+
548
+ layer_head_mask = head_mask[i] if head_mask is not None else None
549
+ past_key_value = past_key_values[i] if past_key_values is not None else None
550
+
551
+ if self.gradient_checkpointing and self.training:
552
+ layer_outputs = self._gradient_checkpointing_func(
553
+ layer_module.__call__,
554
+ hidden_states,
555
+ attention_mask,
556
+ layer_head_mask,
557
+ encoder_hidden_states,
558
+ encoder_attention_mask,
559
+ past_key_value,
560
+ output_attentions,
561
+ )
562
+ else:
563
+ layer_outputs = layer_module(
564
+ hidden_states,
565
+ attention_mask,
566
+ layer_head_mask,
567
+ encoder_hidden_states,
568
+ encoder_attention_mask,
569
+ past_key_value,
570
+ output_attentions,
571
+ )
572
+
573
+ # Because we moved the layer-norm at the end of the hidden layer, we have non-normali-
574
+ # zed data here. If that's really needed, we must apply LN to match Transformer's BERT.
575
+
576
+ hidden_states = layer_outputs[0]
577
+ if use_cache:
578
+ next_decoder_cache += (layer_outputs[-1],)
579
+ if output_attentions:
580
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
581
+ if self.config.add_cross_attention:
582
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
583
+
584
+ # Finalize the hidden states.
585
+ hidden_states = self.ln(hidden_states)
586
+
587
+ if output_hidden_states:
588
+ all_hidden_states = all_hidden_states + (hidden_states,)
589
+
590
+ if not return_dict:
591
+ return tuple(
592
+ v
593
+ for v in [
594
+ hidden_states,
595
+ next_decoder_cache,
596
+ all_hidden_states,
597
+ all_self_attentions,
598
+ all_cross_attentions,
599
+ ]
600
+ if v is not None
601
+ )
602
+ return BaseModelOutputWithPastAndCrossAttentions(
603
+ last_hidden_state=hidden_states,
604
+ past_key_values=next_decoder_cache,
605
+ hidden_states=all_hidden_states,
606
+ attentions=all_self_attentions,
607
+ cross_attentions=all_cross_attentions,
608
+ )
609
+
610
+
611
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->MegatronBert
612
+ class MegatronBertPooler(nn.Module):
613
+ def __init__(self, config):
614
+ super().__init__()
615
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
616
+ self.activation = nn.Tanh()
617
+
618
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
619
+ # We "pool" the model by simply taking the hidden state corresponding
620
+ # to the first token.
621
+ first_token_tensor = hidden_states[:, 0]
622
+ pooled_output = self.dense(first_token_tensor)
623
+ pooled_output = self.activation(pooled_output)
624
+ return pooled_output
625
+
626
+
627
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MegatronBert
628
+ class MegatronBertPredictionHeadTransform(nn.Module):
629
+ def __init__(self, config):
630
+ super().__init__()
631
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
632
+ if isinstance(config.hidden_act, str):
633
+ self.transform_act_fn = ACT2FN[config.hidden_act]
634
+ else:
635
+ self.transform_act_fn = config.hidden_act
636
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
637
+
638
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
639
+ hidden_states = self.dense(hidden_states)
640
+ hidden_states = self.transform_act_fn(hidden_states)
641
+ hidden_states = self.LayerNorm(hidden_states)
642
+ return hidden_states
643
+
644
+
645
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MegatronBert
646
+ class MegatronBertLMPredictionHead(nn.Module):
647
+ def __init__(self, config):
648
+ super().__init__()
649
+ self.transform = MegatronBertPredictionHeadTransform(config)
650
+
651
+ # The output weights are the same as the input embeddings, but there is
652
+ # an output-only bias for each token.
653
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
654
+
655
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
656
+
657
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
658
+ self.decoder.bias = self.bias
659
+
660
+ def forward(self, hidden_states):
661
+ hidden_states = self.transform(hidden_states)
662
+ hidden_states = self.decoder(hidden_states)
663
+ return hidden_states
664
+
665
+
666
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MegatronBert
667
+ class MegatronBertOnlyMLMHead(nn.Module):
668
+ def __init__(self, config):
669
+ super().__init__()
670
+ self.predictions = MegatronBertLMPredictionHead(config)
671
+
672
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
673
+ prediction_scores = self.predictions(sequence_output)
674
+ return prediction_scores
675
+
676
+
677
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->MegatronBert
678
+ class MegatronBertOnlyNSPHead(nn.Module):
679
+ def __init__(self, config):
680
+ super().__init__()
681
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
682
+
683
+ def forward(self, pooled_output):
684
+ seq_relationship_score = self.seq_relationship(pooled_output)
685
+ return seq_relationship_score
686
+
687
+
688
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->MegatronBert
689
+ class MegatronBertPreTrainingHeads(nn.Module):
690
+ def __init__(self, config):
691
+ super().__init__()
692
+ self.predictions = MegatronBertLMPredictionHead(config)
693
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
694
+
695
+ def forward(self, sequence_output, pooled_output):
696
+ prediction_scores = self.predictions(sequence_output)
697
+ seq_relationship_score = self.seq_relationship(pooled_output)
698
+ return prediction_scores, seq_relationship_score
699
+
700
+
701
+ class MegatronBertPreTrainedModel(PreTrainedModel):
702
+ """
703
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
704
+ models.
705
+ """
706
+
707
+ config_class = MegatronBertConfig
708
+ load_tf_weights = load_tf_weights_in_megatron_bert
709
+ base_model_prefix = "bert"
710
+ supports_gradient_checkpointing = True
711
+
712
+ def _init_weights(self, module):
713
+ """Initialize the weights"""
714
+ if isinstance(module, (nn.Linear, nn.Embedding)):
715
+ # Slightly different from the TF version which uses truncated_normal for initialization
716
+ # cf https://github.com/pytorch/pytorch/pull/5617
717
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
718
+ elif isinstance(module, nn.LayerNorm):
719
+ module.bias.data.zero_()
720
+ module.weight.data.fill_(1.0)
721
+ if isinstance(module, nn.Linear) and module.bias is not None:
722
+ module.bias.data.zero_()
723
+
724
+
725
+ @dataclass
726
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->MegatronBert
727
+ class MegatronBertForPreTrainingOutput(ModelOutput):
728
+ """
729
+ Output type of [`MegatronBertForPreTraining`].
730
+
731
+ Args:
732
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
733
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
734
+ (classification) loss.
735
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
736
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
737
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
738
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
739
+ before SoftMax).
740
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
741
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
742
+ shape `(batch_size, sequence_length, hidden_size)`.
743
+
744
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
745
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
746
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
747
+ sequence_length)`.
748
+
749
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
750
+ heads.
751
+ """
752
+
753
+ loss: Optional[torch.FloatTensor] = None
754
+ prediction_logits: torch.FloatTensor = None
755
+ seq_relationship_logits: torch.FloatTensor = None
756
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
757
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
758
+
759
+
760
+ MEGATRON_BERT_START_DOCSTRING = r"""
761
+
762
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
763
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
764
+ etc.)
765
+
766
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
767
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
768
+ and behavior.
769
+
770
+ Parameters:
771
+ config ([`MegatronBertConfig`]): Model configuration class with all the parameters of the model.
772
+ Initializing with a config file does not load the weights associated with the model, only the
773
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
774
+ """
775
+
776
+ MEGATRON_BERT_INPUTS_DOCSTRING = r"""
777
+ Args:
778
+ input_ids (`torch.LongTensor` of shape `({0})`):
779
+ Indices of input sequence tokens in the vocabulary.
780
+
781
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
782
+ [`PreTrainedTokenizer.__call__`] for details.
783
+
784
+ [What are input IDs?](../glossary#input-ids)
785
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
786
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
787
+
788
+ - 1 for tokens that are **not masked**,
789
+ - 0 for tokens that are **masked**.
790
+
791
+ [What are attention masks?](../glossary#attention-mask)
792
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
793
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
794
+ 1]`:
795
+
796
+ - 0 corresponds to a *sentence A* token,
797
+ - 1 corresponds to a *sentence B* token.
798
+
799
+ [What are token type IDs?](../glossary#token-type-ids)
800
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
801
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
802
+ config.max_position_embeddings - 1]`.
803
+
804
+ [What are position IDs?](../glossary#position-ids)
805
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
806
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
807
+
808
+ - 1 indicates the head is **not masked**,
809
+ - 0 indicates the head is **masked**.
810
+
811
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
812
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
813
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
814
+ model's internal embedding lookup matrix.
815
+ output_attentions (`bool`, *optional*):
816
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
817
+ tensors for more detail.
818
+ output_hidden_states (`bool`, *optional*):
819
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
820
+ more detail.
821
+ return_dict (`bool`, *optional*):
822
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
823
+ """
824
+
825
+
826
+ @add_start_docstrings(
827
+ "The bare MegatronBert Model transformer outputting raw hidden-states without any specific head on top.",
828
+ MEGATRON_BERT_START_DOCSTRING,
829
+ )
830
+ class MegatronBertModel(MegatronBertPreTrainedModel):
831
+ """
832
+
833
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
834
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
835
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
836
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
837
+
838
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
839
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
840
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
841
+ """
842
+
843
+ def __init__(self, config, add_pooling_layer=True):
844
+ super().__init__(config)
845
+ self.config = config
846
+
847
+ self.embeddings = MegatronBertEmbeddings(config)
848
+ self.encoder = MegatronBertEncoder(config)
849
+
850
+ self.pooler = MegatronBertPooler(config) if add_pooling_layer else None
851
+
852
+ # Initialize weights and apply final processing
853
+ self.post_init()
854
+
855
+ def get_input_embeddings(self):
856
+ return self.embeddings.word_embeddings
857
+
858
+ def set_input_embeddings(self, value):
859
+ self.embeddings.word_embeddings = value
860
+
861
+ def _prune_heads(self, heads_to_prune):
862
+ """
863
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
864
+ class PreTrainedModel
865
+ """
866
+ for layer, heads in heads_to_prune.items():
867
+ self.encoder.layer[layer].attention.prune_heads(heads)
868
+
869
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
870
+ @add_code_sample_docstrings(
871
+ checkpoint=_CHECKPOINT_FOR_DOC,
872
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
873
+ config_class=_CONFIG_FOR_DOC,
874
+ )
875
+ def forward(
876
+ self,
877
+ input_ids: Optional[torch.LongTensor] = None,
878
+ attention_mask: Optional[torch.FloatTensor] = None,
879
+ token_type_ids: Optional[torch.LongTensor] = None,
880
+ position_ids: Optional[torch.LongTensor] = None,
881
+ head_mask: Optional[torch.FloatTensor] = None,
882
+ inputs_embeds: Optional[torch.FloatTensor] = None,
883
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
884
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
885
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
886
+ use_cache: Optional[bool] = None,
887
+ output_attentions: Optional[bool] = None,
888
+ output_hidden_states: Optional[bool] = None,
889
+ return_dict: Optional[bool] = None,
890
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
891
+ r"""
892
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
893
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
894
+ the model is configured as a decoder.
895
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
896
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
897
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
898
+
899
+ - 1 for tokens that are **not masked**,
900
+ - 0 for tokens that are **masked**.
901
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
902
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
903
+
904
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
905
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
906
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
907
+ use_cache (`bool`, *optional*):
908
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
909
+ `past_key_values`).
910
+ """
911
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
912
+ output_hidden_states = (
913
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
914
+ )
915
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
916
+
917
+ if self.config.is_decoder:
918
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
919
+ else:
920
+ use_cache = False
921
+
922
+ if input_ids is not None and inputs_embeds is not None:
923
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
924
+ elif input_ids is not None:
925
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
926
+ input_shape = input_ids.size()
927
+ elif inputs_embeds is not None:
928
+ input_shape = inputs_embeds.size()[:-1]
929
+ else:
930
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
931
+
932
+ batch_size, seq_length = input_shape
933
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
934
+
935
+ # past_key_values_length
936
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
937
+
938
+ if attention_mask is None:
939
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
940
+ if token_type_ids is None:
941
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
942
+
943
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
944
+ # ourselves in which case we just need to make it broadcastable to all heads.
945
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
946
+
947
+ # If a 2D or 3D attention mask is provided for the cross-attention
948
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
949
+ if self.config.is_decoder and encoder_hidden_states is not None:
950
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
951
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
952
+ if encoder_attention_mask is None:
953
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
954
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
955
+ else:
956
+ encoder_extended_attention_mask = None
957
+
958
+ # Prepare head mask if needed
959
+ # 1.0 in head_mask indicate we keep the head
960
+ # attention_probs has shape bsz x n_heads x N x N
961
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
962
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
963
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
964
+
965
+ embedding_output = self.embeddings(
966
+ input_ids=input_ids,
967
+ position_ids=position_ids,
968
+ token_type_ids=token_type_ids,
969
+ inputs_embeds=inputs_embeds,
970
+ past_key_values_length=past_key_values_length,
971
+ )
972
+ encoder_outputs = self.encoder(
973
+ embedding_output,
974
+ attention_mask=extended_attention_mask,
975
+ head_mask=head_mask,
976
+ encoder_hidden_states=encoder_hidden_states,
977
+ encoder_attention_mask=encoder_extended_attention_mask,
978
+ past_key_values=past_key_values,
979
+ use_cache=use_cache,
980
+ output_attentions=output_attentions,
981
+ output_hidden_states=output_hidden_states,
982
+ return_dict=return_dict,
983
+ )
984
+ sequence_output = encoder_outputs[0]
985
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
986
+
987
+ if not return_dict:
988
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
989
+
990
+ return BaseModelOutputWithPoolingAndCrossAttentions(
991
+ last_hidden_state=sequence_output,
992
+ pooler_output=pooled_output,
993
+ past_key_values=encoder_outputs.past_key_values,
994
+ hidden_states=encoder_outputs.hidden_states,
995
+ attentions=encoder_outputs.attentions,
996
+ cross_attentions=encoder_outputs.cross_attentions,
997
+ )
998
+
999
+
1000
+ @add_start_docstrings(
1001
+ """
1002
+ MegatronBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
1003
+ `next sentence prediction (classification)` head.
1004
+ """,
1005
+ MEGATRON_BERT_START_DOCSTRING,
1006
+ )
1007
+ class MegatronBertForPreTraining(MegatronBertPreTrainedModel):
1008
+ _tied_weights_keys = ["cls.predictions.decoder"]
1009
+
1010
+ def __init__(self, config, add_binary_head=True):
1011
+ super().__init__(config)
1012
+
1013
+ self.bert = MegatronBertModel(config)
1014
+ self.cls = MegatronBertPreTrainingHeads(config)
1015
+
1016
+ # Initialize weights and apply final processing
1017
+ self.post_init()
1018
+
1019
+ def get_output_embeddings(self):
1020
+ return self.cls.predictions.decoder
1021
+
1022
+ def set_output_embeddings(self, new_embeddings):
1023
+ self.cls.predictions.decoder = new_embeddings
1024
+
1025
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1026
+ @replace_return_docstrings(output_type=MegatronBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1027
+ def forward(
1028
+ self,
1029
+ input_ids: Optional[torch.LongTensor] = None,
1030
+ attention_mask: Optional[torch.FloatTensor] = None,
1031
+ token_type_ids: Optional[torch.LongTensor] = None,
1032
+ position_ids: Optional[torch.LongTensor] = None,
1033
+ head_mask: Optional[torch.FloatTensor] = None,
1034
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1035
+ labels: Optional[torch.LongTensor] = None,
1036
+ next_sentence_label: Optional[torch.LongTensor] = None,
1037
+ output_attentions: Optional[bool] = None,
1038
+ output_hidden_states: Optional[bool] = None,
1039
+ return_dict: Optional[bool] = None,
1040
+ ) -> Union[Tuple, MegatronBertForPreTrainingOutput]:
1041
+ r"""
1042
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1043
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1044
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1045
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1046
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1047
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1048
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
1049
+
1050
+ - 0 indicates sequence B is a continuation of sequence A,
1051
+ - 1 indicates sequence B is a random sequence.
1052
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1053
+ Used to hide legacy arguments that have been deprecated.
1054
+
1055
+ Returns:
1056
+
1057
+ Example:
1058
+
1059
+ ```python
1060
+ >>> from transformers import AutoTokenizer, MegatronBertForPreTraining
1061
+ >>> import torch
1062
+
1063
+ >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
1064
+ >>> model = MegatronBertForPreTraining.from_pretrained("nvidia/megatron-bert-cased-345m")
1065
+
1066
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1067
+ >>> outputs = model(**inputs)
1068
+
1069
+ >>> prediction_logits = outputs.prediction_logits
1070
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1071
+ ```"""
1072
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1073
+
1074
+ outputs = self.bert(
1075
+ input_ids,
1076
+ attention_mask=attention_mask,
1077
+ token_type_ids=token_type_ids,
1078
+ position_ids=position_ids,
1079
+ head_mask=head_mask,
1080
+ inputs_embeds=inputs_embeds,
1081
+ output_attentions=output_attentions,
1082
+ output_hidden_states=output_hidden_states,
1083
+ return_dict=return_dict,
1084
+ )
1085
+
1086
+ sequence_output, pooled_output = outputs[:2]
1087
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1088
+
1089
+ total_loss = None
1090
+ if labels is not None and next_sentence_label is not None:
1091
+ loss_fct = CrossEntropyLoss()
1092
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1093
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1094
+ total_loss = masked_lm_loss + next_sentence_loss
1095
+
1096
+ if not return_dict:
1097
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1098
+ return ((total_loss,) + output) if total_loss is not None else output
1099
+
1100
+ return MegatronBertForPreTrainingOutput(
1101
+ loss=total_loss,
1102
+ prediction_logits=prediction_scores,
1103
+ seq_relationship_logits=seq_relationship_score,
1104
+ hidden_states=outputs.hidden_states,
1105
+ attentions=outputs.attentions,
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings(
1110
+ """MegatronBert Model with a `language modeling` head on top for CLM fine-tuning.""",
1111
+ MEGATRON_BERT_START_DOCSTRING,
1112
+ )
1113
+ class MegatronBertForCausalLM(MegatronBertPreTrainedModel):
1114
+ _tied_weights_keys = ["cls.predictions.decoder"]
1115
+
1116
+ def __init__(self, config):
1117
+ super().__init__(config)
1118
+
1119
+ if not config.is_decoder:
1120
+ logger.warning("If you want to use `MegatronBertForCausalLM` as a standalone, add `is_decoder=True.`")
1121
+
1122
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1123
+ self.cls = MegatronBertOnlyMLMHead(config)
1124
+
1125
+ # Initialize weights and apply final processing
1126
+ self.post_init()
1127
+
1128
+ def get_output_embeddings(self):
1129
+ return self.cls.predictions.decoder
1130
+
1131
+ def set_output_embeddings(self, new_embeddings):
1132
+ self.cls.predictions.decoder = new_embeddings
1133
+
1134
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1135
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1136
+ def forward(
1137
+ self,
1138
+ input_ids: Optional[torch.LongTensor] = None,
1139
+ attention_mask: Optional[torch.FloatTensor] = None,
1140
+ token_type_ids: Optional[torch.LongTensor] = None,
1141
+ position_ids: Optional[torch.LongTensor] = None,
1142
+ head_mask: Optional[torch.FloatTensor] = None,
1143
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1144
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1145
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1146
+ labels: Optional[torch.LongTensor] = None,
1147
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1148
+ use_cache: Optional[bool] = None,
1149
+ output_attentions: Optional[bool] = None,
1150
+ output_hidden_states: Optional[bool] = None,
1151
+ return_dict: Optional[bool] = None,
1152
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1153
+ r"""
1154
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1155
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1156
+ the model is configured as a decoder.
1157
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1158
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1159
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1160
+
1161
+ - 1 for tokens that are **not masked**,
1162
+ - 0 for tokens that are **masked**.
1163
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1164
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1165
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1166
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1167
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1168
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1169
+
1170
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1171
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1172
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1173
+ use_cache (`bool`, *optional*):
1174
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1175
+ `past_key_values`).
1176
+
1177
+ Returns:
1178
+
1179
+ Example:
1180
+
1181
+ ```python
1182
+ >>> from transformers import AutoTokenizer, MegatronBertForCausalLM, MegatronBertConfig
1183
+ >>> import torch
1184
+
1185
+ >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
1186
+ >>> model = MegatronBertForCausalLM.from_pretrained("nvidia/megatron-bert-cased-345m", is_decoder=True)
1187
+
1188
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1189
+ >>> outputs = model(**inputs)
1190
+
1191
+ >>> prediction_logits = outputs.logits
1192
+ ```"""
1193
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1194
+ if labels is not None:
1195
+ use_cache = False
1196
+
1197
+ outputs = self.bert(
1198
+ input_ids,
1199
+ attention_mask=attention_mask,
1200
+ token_type_ids=token_type_ids,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ encoder_hidden_states=encoder_hidden_states,
1205
+ encoder_attention_mask=encoder_attention_mask,
1206
+ past_key_values=past_key_values,
1207
+ use_cache=use_cache,
1208
+ output_attentions=output_attentions,
1209
+ output_hidden_states=output_hidden_states,
1210
+ return_dict=return_dict,
1211
+ )
1212
+
1213
+ sequence_output = outputs[0]
1214
+ prediction_scores = self.cls(sequence_output)
1215
+
1216
+ lm_loss = None
1217
+ if labels is not None:
1218
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1219
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1220
+ labels = labels[:, 1:].contiguous()
1221
+ loss_fct = CrossEntropyLoss()
1222
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1223
+
1224
+ if not return_dict:
1225
+ output = (prediction_scores,) + outputs[2:]
1226
+ return ((lm_loss,) + output) if lm_loss is not None else output
1227
+
1228
+ return CausalLMOutputWithCrossAttentions(
1229
+ loss=lm_loss,
1230
+ logits=prediction_scores,
1231
+ past_key_values=outputs.past_key_values,
1232
+ hidden_states=outputs.hidden_states,
1233
+ attentions=outputs.attentions,
1234
+ cross_attentions=outputs.cross_attentions,
1235
+ )
1236
+
1237
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1238
+ input_shape = input_ids.shape
1239
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1240
+ if attention_mask is None:
1241
+ attention_mask = input_ids.new_ones(input_shape)
1242
+
1243
+ # cut decoder_input_ids if past_key_values is used
1244
+ if past_key_values is not None:
1245
+ past_length = past_key_values[0][0].shape[2]
1246
+
1247
+ # Some generation methods already pass only the last input ID
1248
+ if input_ids.shape[1] > past_length:
1249
+ remove_prefix_length = past_length
1250
+ else:
1251
+ # Default to old behavior: keep only final ID
1252
+ remove_prefix_length = input_ids.shape[1] - 1
1253
+
1254
+ input_ids = input_ids[:, remove_prefix_length:]
1255
+
1256
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1257
+
1258
+ def _reorder_cache(self, past_key_values, beam_idx):
1259
+ reordered_past = ()
1260
+ for layer_past in past_key_values:
1261
+ reordered_past += (
1262
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1263
+ )
1264
+ return reordered_past
1265
+
1266
+
1267
+ @add_start_docstrings("""MegatronBert Model with a `language modeling` head on top.""", MEGATRON_BERT_START_DOCSTRING)
1268
+ class MegatronBertForMaskedLM(MegatronBertPreTrainedModel):
1269
+ _tied_weights_keys = ["cls.predictions.decoder"]
1270
+
1271
+ def __init__(self, config):
1272
+ super().__init__(config)
1273
+
1274
+ if config.is_decoder:
1275
+ logger.warning(
1276
+ "If you want to use `MegatronBertForMaskedLM` make sure `config.is_decoder=False` for "
1277
+ "bi-directional self-attention."
1278
+ )
1279
+
1280
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1281
+ self.cls = MegatronBertOnlyMLMHead(config)
1282
+
1283
+ # Initialize weights and apply final processing
1284
+ self.post_init()
1285
+
1286
+ def get_output_embeddings(self):
1287
+ return self.cls.predictions.decoder
1288
+
1289
+ def set_output_embeddings(self, new_embeddings):
1290
+ self.cls.predictions.decoder = new_embeddings
1291
+
1292
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1293
+ @add_code_sample_docstrings(
1294
+ checkpoint=_CHECKPOINT_FOR_DOC,
1295
+ output_type=MaskedLMOutput,
1296
+ config_class=_CONFIG_FOR_DOC,
1297
+ )
1298
+ def forward(
1299
+ self,
1300
+ input_ids: Optional[torch.LongTensor] = None,
1301
+ attention_mask: Optional[torch.FloatTensor] = None,
1302
+ token_type_ids: Optional[torch.LongTensor] = None,
1303
+ position_ids: Optional[torch.LongTensor] = None,
1304
+ head_mask: Optional[torch.FloatTensor] = None,
1305
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1306
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1307
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1308
+ labels: Optional[torch.LongTensor] = None,
1309
+ output_attentions: Optional[bool] = None,
1310
+ output_hidden_states: Optional[bool] = None,
1311
+ return_dict: Optional[bool] = None,
1312
+ ) -> Union[Tuple, MaskedLMOutput]:
1313
+ r"""
1314
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1315
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1316
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1317
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1318
+ """
1319
+
1320
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1321
+
1322
+ outputs = self.bert(
1323
+ input_ids,
1324
+ attention_mask=attention_mask,
1325
+ token_type_ids=token_type_ids,
1326
+ position_ids=position_ids,
1327
+ head_mask=head_mask,
1328
+ inputs_embeds=inputs_embeds,
1329
+ encoder_hidden_states=encoder_hidden_states,
1330
+ encoder_attention_mask=encoder_attention_mask,
1331
+ output_attentions=output_attentions,
1332
+ output_hidden_states=output_hidden_states,
1333
+ return_dict=return_dict,
1334
+ )
1335
+
1336
+ sequence_output = outputs[0]
1337
+ prediction_scores = self.cls(sequence_output)
1338
+
1339
+ masked_lm_loss = None
1340
+ if labels is not None:
1341
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1342
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1343
+
1344
+ if not return_dict:
1345
+ output = (prediction_scores,) + outputs[2:]
1346
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1347
+
1348
+ return MaskedLMOutput(
1349
+ loss=masked_lm_loss,
1350
+ logits=prediction_scores,
1351
+ hidden_states=outputs.hidden_states,
1352
+ attentions=outputs.attentions,
1353
+ )
1354
+
1355
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1356
+ input_shape = input_ids.shape
1357
+ effective_batch_size = input_shape[0]
1358
+
1359
+ # add a dummy token
1360
+ if self.config.pad_token_id is None:
1361
+ raise ValueError("The PAD token should be defined for generation")
1362
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1363
+ dummy_token = torch.full(
1364
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1365
+ )
1366
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1367
+
1368
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1369
+
1370
+
1371
+ @add_start_docstrings(
1372
+ """MegatronBert Model with a `next sentence prediction (classification)` head on top.""",
1373
+ MEGATRON_BERT_START_DOCSTRING,
1374
+ )
1375
+ class MegatronBertForNextSentencePrediction(MegatronBertPreTrainedModel):
1376
+ def __init__(self, config):
1377
+ super().__init__(config)
1378
+
1379
+ self.bert = MegatronBertModel(config)
1380
+ self.cls = MegatronBertOnlyNSPHead(config)
1381
+
1382
+ # Initialize weights and apply final processing
1383
+ self.post_init()
1384
+
1385
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1386
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1387
+ def forward(
1388
+ self,
1389
+ input_ids: Optional[torch.LongTensor] = None,
1390
+ attention_mask: Optional[torch.FloatTensor] = None,
1391
+ token_type_ids: Optional[torch.LongTensor] = None,
1392
+ position_ids: Optional[torch.LongTensor] = None,
1393
+ head_mask: Optional[torch.FloatTensor] = None,
1394
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1395
+ labels: Optional[torch.LongTensor] = None,
1396
+ output_attentions: Optional[bool] = None,
1397
+ output_hidden_states: Optional[bool] = None,
1398
+ return_dict: Optional[bool] = None,
1399
+ **kwargs,
1400
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
1401
+ r"""
1402
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1403
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1404
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1405
+
1406
+ - 0 indicates sequence B is a continuation of sequence A,
1407
+ - 1 indicates sequence B is a random sequence.
1408
+
1409
+ Returns:
1410
+
1411
+ Example:
1412
+
1413
+ ```python
1414
+ >>> from transformers import AutoTokenizer, MegatronBertForNextSentencePrediction
1415
+ >>> import torch
1416
+
1417
+ >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
1418
+ >>> model = MegatronBertForNextSentencePrediction.from_pretrained("nvidia/megatron-bert-cased-345m")
1419
+
1420
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1421
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1422
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1423
+
1424
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1425
+ >>> logits = outputs.logits
1426
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1427
+ ```"""
1428
+
1429
+ if "next_sentence_label" in kwargs:
1430
+ warnings.warn(
1431
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1432
+ " `labels` instead.",
1433
+ FutureWarning,
1434
+ )
1435
+ labels = kwargs.pop("next_sentence_label")
1436
+
1437
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1438
+
1439
+ outputs = self.bert(
1440
+ input_ids,
1441
+ attention_mask=attention_mask,
1442
+ token_type_ids=token_type_ids,
1443
+ position_ids=position_ids,
1444
+ head_mask=head_mask,
1445
+ inputs_embeds=inputs_embeds,
1446
+ output_attentions=output_attentions,
1447
+ output_hidden_states=output_hidden_states,
1448
+ return_dict=return_dict,
1449
+ )
1450
+
1451
+ pooled_output = outputs[1]
1452
+
1453
+ seq_relationship_scores = self.cls(pooled_output)
1454
+
1455
+ next_sentence_loss = None
1456
+ if labels is not None:
1457
+ loss_fct = CrossEntropyLoss()
1458
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1459
+
1460
+ if not return_dict:
1461
+ output = (seq_relationship_scores,) + outputs[2:]
1462
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1463
+
1464
+ return NextSentencePredictorOutput(
1465
+ loss=next_sentence_loss,
1466
+ logits=seq_relationship_scores,
1467
+ hidden_states=outputs.hidden_states,
1468
+ attentions=outputs.attentions,
1469
+ )
1470
+
1471
+
1472
+ @add_start_docstrings(
1473
+ """
1474
+ MegatronBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1475
+ pooled output) e.g. for GLUE tasks.
1476
+ """,
1477
+ MEGATRON_BERT_START_DOCSTRING,
1478
+ )
1479
+ class MegatronBertForSequenceClassification(MegatronBertPreTrainedModel):
1480
+ def __init__(self, config):
1481
+ super().__init__(config)
1482
+ self.num_labels = config.num_labels
1483
+
1484
+ self.bert = MegatronBertModel(config)
1485
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1486
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1487
+
1488
+ # Initialize weights and apply final processing
1489
+ self.post_init()
1490
+
1491
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1492
+ @add_code_sample_docstrings(
1493
+ checkpoint=_CHECKPOINT_FOR_DOC,
1494
+ output_type=SequenceClassifierOutput,
1495
+ config_class=_CONFIG_FOR_DOC,
1496
+ )
1497
+ def forward(
1498
+ self,
1499
+ input_ids: Optional[torch.LongTensor] = None,
1500
+ attention_mask: Optional[torch.FloatTensor] = None,
1501
+ token_type_ids: Optional[torch.LongTensor] = None,
1502
+ position_ids: Optional[torch.LongTensor] = None,
1503
+ head_mask: Optional[torch.FloatTensor] = None,
1504
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1505
+ labels: Optional[torch.LongTensor] = None,
1506
+ output_attentions: Optional[bool] = None,
1507
+ output_hidden_states: Optional[bool] = None,
1508
+ return_dict: Optional[bool] = None,
1509
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1510
+ r"""
1511
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1512
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1513
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1514
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1515
+ """
1516
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1517
+
1518
+ outputs = self.bert(
1519
+ input_ids,
1520
+ attention_mask=attention_mask,
1521
+ token_type_ids=token_type_ids,
1522
+ position_ids=position_ids,
1523
+ head_mask=head_mask,
1524
+ inputs_embeds=inputs_embeds,
1525
+ output_attentions=output_attentions,
1526
+ output_hidden_states=output_hidden_states,
1527
+ return_dict=return_dict,
1528
+ )
1529
+
1530
+ pooled_output = outputs[1]
1531
+
1532
+ pooled_output = self.dropout(pooled_output)
1533
+ logits = self.classifier(pooled_output)
1534
+
1535
+ loss = None
1536
+ if labels is not None:
1537
+ if self.config.problem_type is None:
1538
+ if self.num_labels == 1:
1539
+ self.config.problem_type = "regression"
1540
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1541
+ self.config.problem_type = "single_label_classification"
1542
+ else:
1543
+ self.config.problem_type = "multi_label_classification"
1544
+
1545
+ if self.config.problem_type == "regression":
1546
+ loss_fct = MSELoss()
1547
+ if self.num_labels == 1:
1548
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1549
+ else:
1550
+ loss = loss_fct(logits, labels)
1551
+ elif self.config.problem_type == "single_label_classification":
1552
+ loss_fct = CrossEntropyLoss()
1553
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1554
+ elif self.config.problem_type == "multi_label_classification":
1555
+ loss_fct = BCEWithLogitsLoss()
1556
+ loss = loss_fct(logits, labels)
1557
+ if not return_dict:
1558
+ output = (logits,) + outputs[2:]
1559
+ return ((loss,) + output) if loss is not None else output
1560
+
1561
+ return SequenceClassifierOutput(
1562
+ loss=loss,
1563
+ logits=logits,
1564
+ hidden_states=outputs.hidden_states,
1565
+ attentions=outputs.attentions,
1566
+ )
1567
+
1568
+
1569
+ @add_start_docstrings(
1570
+ """
1571
+ MegatronBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output
1572
+ and a softmax) e.g. for RocStories/SWAG tasks.
1573
+ """,
1574
+ MEGATRON_BERT_START_DOCSTRING,
1575
+ )
1576
+ class MegatronBertForMultipleChoice(MegatronBertPreTrainedModel):
1577
+ def __init__(self, config):
1578
+ super().__init__(config)
1579
+
1580
+ self.bert = MegatronBertModel(config)
1581
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1582
+ self.classifier = nn.Linear(config.hidden_size, 1)
1583
+
1584
+ # Initialize weights and apply final processing
1585
+ self.post_init()
1586
+
1587
+ @add_start_docstrings_to_model_forward(
1588
+ MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1589
+ )
1590
+ @add_code_sample_docstrings(
1591
+ checkpoint=_CHECKPOINT_FOR_DOC,
1592
+ output_type=MultipleChoiceModelOutput,
1593
+ config_class=_CONFIG_FOR_DOC,
1594
+ )
1595
+ def forward(
1596
+ self,
1597
+ input_ids: Optional[torch.LongTensor] = None,
1598
+ attention_mask: Optional[torch.FloatTensor] = None,
1599
+ token_type_ids: Optional[torch.LongTensor] = None,
1600
+ position_ids: Optional[torch.LongTensor] = None,
1601
+ head_mask: Optional[torch.FloatTensor] = None,
1602
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1603
+ labels: Optional[torch.LongTensor] = None,
1604
+ output_attentions: Optional[bool] = None,
1605
+ output_hidden_states: Optional[bool] = None,
1606
+ return_dict: Optional[bool] = None,
1607
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1608
+ r"""
1609
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1610
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1611
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1612
+ `input_ids` above)
1613
+ """
1614
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1615
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1616
+
1617
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1618
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1619
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1620
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1621
+ inputs_embeds = (
1622
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1623
+ if inputs_embeds is not None
1624
+ else None
1625
+ )
1626
+
1627
+ outputs = self.bert(
1628
+ input_ids,
1629
+ attention_mask=attention_mask,
1630
+ token_type_ids=token_type_ids,
1631
+ position_ids=position_ids,
1632
+ head_mask=head_mask,
1633
+ inputs_embeds=inputs_embeds,
1634
+ output_attentions=output_attentions,
1635
+ output_hidden_states=output_hidden_states,
1636
+ return_dict=return_dict,
1637
+ )
1638
+
1639
+ pooled_output = outputs[1]
1640
+
1641
+ pooled_output = self.dropout(pooled_output)
1642
+ logits = self.classifier(pooled_output)
1643
+ reshaped_logits = logits.view(-1, num_choices)
1644
+
1645
+ loss = None
1646
+ if labels is not None:
1647
+ loss_fct = CrossEntropyLoss()
1648
+ loss = loss_fct(reshaped_logits, labels)
1649
+
1650
+ if not return_dict:
1651
+ output = (reshaped_logits,) + outputs[2:]
1652
+ return ((loss,) + output) if loss is not None else output
1653
+
1654
+ return MultipleChoiceModelOutput(
1655
+ loss=loss,
1656
+ logits=reshaped_logits,
1657
+ hidden_states=outputs.hidden_states,
1658
+ attentions=outputs.attentions,
1659
+ )
1660
+
1661
+
1662
+ @add_start_docstrings(
1663
+ """
1664
+ MegatronBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1665
+ for Named-Entity-Recognition (NER) tasks.
1666
+ """,
1667
+ MEGATRON_BERT_START_DOCSTRING,
1668
+ )
1669
+ class MegatronBertForTokenClassification(MegatronBertPreTrainedModel):
1670
+ def __init__(self, config):
1671
+ super().__init__(config)
1672
+ self.num_labels = config.num_labels
1673
+
1674
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1675
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1676
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1677
+
1678
+ # Initialize weights and apply final processing
1679
+ self.post_init()
1680
+
1681
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1682
+ @add_code_sample_docstrings(
1683
+ checkpoint=_CHECKPOINT_FOR_DOC,
1684
+ output_type=TokenClassifierOutput,
1685
+ config_class=_CONFIG_FOR_DOC,
1686
+ )
1687
+ def forward(
1688
+ self,
1689
+ input_ids: Optional[torch.LongTensor] = None,
1690
+ attention_mask: Optional[torch.FloatTensor] = None,
1691
+ token_type_ids: Optional[torch.LongTensor] = None,
1692
+ position_ids: Optional[torch.LongTensor] = None,
1693
+ head_mask: Optional[torch.FloatTensor] = None,
1694
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1695
+ labels: Optional[torch.LongTensor] = None,
1696
+ output_attentions: Optional[bool] = None,
1697
+ output_hidden_states: Optional[bool] = None,
1698
+ return_dict: Optional[bool] = None,
1699
+ ) -> Union[Tuple, TokenClassifierOutput]:
1700
+ r"""
1701
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1702
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1703
+ """
1704
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1705
+
1706
+ outputs = self.bert(
1707
+ input_ids,
1708
+ attention_mask=attention_mask,
1709
+ token_type_ids=token_type_ids,
1710
+ position_ids=position_ids,
1711
+ head_mask=head_mask,
1712
+ inputs_embeds=inputs_embeds,
1713
+ output_attentions=output_attentions,
1714
+ output_hidden_states=output_hidden_states,
1715
+ return_dict=return_dict,
1716
+ )
1717
+
1718
+ sequence_output = outputs[0]
1719
+
1720
+ sequence_output = self.dropout(sequence_output)
1721
+ logits = self.classifier(sequence_output)
1722
+
1723
+ loss = None
1724
+ if labels is not None:
1725
+ loss_fct = CrossEntropyLoss()
1726
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1727
+
1728
+ if not return_dict:
1729
+ output = (logits,) + outputs[2:]
1730
+ return ((loss,) + output) if loss is not None else output
1731
+
1732
+ return TokenClassifierOutput(
1733
+ loss=loss,
1734
+ logits=logits,
1735
+ hidden_states=outputs.hidden_states,
1736
+ attentions=outputs.attentions,
1737
+ )
1738
+
1739
+
1740
+ @add_start_docstrings(
1741
+ """
1742
+ MegatronBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1743
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1744
+ """,
1745
+ MEGATRON_BERT_START_DOCSTRING,
1746
+ )
1747
+ class MegatronBertForQuestionAnswering(MegatronBertPreTrainedModel):
1748
+ def __init__(self, config):
1749
+ super().__init__(config)
1750
+ self.num_labels = config.num_labels
1751
+
1752
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1753
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1754
+
1755
+ # Initialize weights and apply final processing
1756
+ self.post_init()
1757
+
1758
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1759
+ @add_code_sample_docstrings(
1760
+ checkpoint=_CHECKPOINT_FOR_DOC,
1761
+ output_type=QuestionAnsweringModelOutput,
1762
+ config_class=_CONFIG_FOR_DOC,
1763
+ )
1764
+ def forward(
1765
+ self,
1766
+ input_ids: Optional[torch.LongTensor] = None,
1767
+ attention_mask: Optional[torch.FloatTensor] = None,
1768
+ token_type_ids: Optional[torch.LongTensor] = None,
1769
+ position_ids: Optional[torch.LongTensor] = None,
1770
+ head_mask: Optional[torch.FloatTensor] = None,
1771
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1772
+ start_positions: Optional[torch.LongTensor] = None,
1773
+ end_positions: Optional[torch.LongTensor] = None,
1774
+ output_attentions: Optional[bool] = None,
1775
+ output_hidden_states: Optional[bool] = None,
1776
+ return_dict: Optional[bool] = None,
1777
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1778
+ r"""
1779
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1780
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1781
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1782
+ are not taken into account for computing the loss.
1783
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1784
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1785
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1786
+ are not taken into account for computing the loss.
1787
+ """
1788
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1789
+
1790
+ outputs = self.bert(
1791
+ input_ids,
1792
+ attention_mask=attention_mask,
1793
+ token_type_ids=token_type_ids,
1794
+ position_ids=position_ids,
1795
+ head_mask=head_mask,
1796
+ inputs_embeds=inputs_embeds,
1797
+ output_attentions=output_attentions,
1798
+ output_hidden_states=output_hidden_states,
1799
+ return_dict=return_dict,
1800
+ )
1801
+
1802
+ sequence_output = outputs[0]
1803
+
1804
+ logits = self.qa_outputs(sequence_output)
1805
+ start_logits, end_logits = logits.split(1, dim=-1)
1806
+ start_logits = start_logits.squeeze(-1).contiguous()
1807
+ end_logits = end_logits.squeeze(-1).contiguous()
1808
+
1809
+ total_loss = None
1810
+ if start_positions is not None and end_positions is not None:
1811
+ # If we are on multi-GPU, split add a dimension
1812
+ if len(start_positions.size()) > 1:
1813
+ start_positions = start_positions.squeeze(-1)
1814
+ if len(end_positions.size()) > 1:
1815
+ end_positions = end_positions.squeeze(-1)
1816
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1817
+ ignored_index = start_logits.size(1)
1818
+ start_positions = start_positions.clamp(0, ignored_index)
1819
+ end_positions = end_positions.clamp(0, ignored_index)
1820
+
1821
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1822
+ start_loss = loss_fct(start_logits, start_positions)
1823
+ end_loss = loss_fct(end_logits, end_positions)
1824
+ total_loss = (start_loss + end_loss) / 2
1825
+
1826
+ if not return_dict:
1827
+ output = (start_logits, end_logits) + outputs[2:]
1828
+ return ((total_loss,) + output) if total_loss is not None else output
1829
+
1830
+ return QuestionAnsweringModelOutput(
1831
+ loss=total_loss,
1832
+ start_logits=start_logits,
1833
+ end_logits=end_logits,
1834
+ hidden_states=outputs.hidden_states,
1835
+ attentions=outputs.attentions,
1836
+ )