applied-ai-018 commited on
Commit
132bf9e
·
verified ·
1 Parent(s): 7b2af3c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__init__.py +56 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/configuration_dinat.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/modeling_dinat.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/configuration_dinat.py +152 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/modeling_dinat.py +976 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__init__.py +103 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/__init__.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/configuration_flaubert.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_flaubert.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_tf_flaubert.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/tokenization_flaubert.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/configuration_flaubert.py +234 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/modeling_flaubert.py +1302 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/modeling_tf_flaubert.py +1337 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/tokenization_flaubert.py +565 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__init__.py +112 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py +218 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py +718 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py +1427 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py +1099 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__init__.py +57 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/configuration_llava.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/convert_llava_weights_to_hf.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/modeling_llava.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/processing_llava.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py +156 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/convert_llava_weights_to_hf.py +148 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/modeling_llava.py +572 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/llava/processing_llava.py +135 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/convert_musicgen_transformers.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py +258 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py +62 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py +148 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py +1378 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/tapas/__init__.py +95 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_dinat": ["DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DinatConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_dinat"] = [
29
+ "DINAT_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "DinatForImageClassification",
31
+ "DinatModel",
32
+ "DinatPreTrainedModel",
33
+ "DinatBackbone",
34
+ ]
35
+
36
+ if TYPE_CHECKING:
37
+ from .configuration_dinat import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP, DinatConfig
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ from .modeling_dinat import (
46
+ DINAT_PRETRAINED_MODEL_ARCHIVE_LIST,
47
+ DinatBackbone,
48
+ DinatForImageClassification,
49
+ DinatModel,
50
+ DinatPreTrainedModel,
51
+ )
52
+
53
+ else:
54
+ import sys
55
+
56
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (966 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/configuration_dinat.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/modeling_dinat.cpython-310.pyc ADDED
Binary file (32.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/configuration_dinat.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Dilated Neighborhood Attention Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class DinatConfig(BackboneConfigMixin, PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the Dinat
33
+ [shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ patch_size (`int`, *optional*, defaults to 4):
40
+ The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
41
+ num_channels (`int`, *optional*, defaults to 3):
42
+ The number of input channels.
43
+ embed_dim (`int`, *optional*, defaults to 64):
44
+ Dimensionality of patch embedding.
45
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
46
+ Number of layers in each level of the encoder.
47
+ num_heads (`List[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
48
+ Number of attention heads in each layer of the Transformer encoder.
49
+ kernel_size (`int`, *optional*, defaults to 7):
50
+ Neighborhood Attention kernel size.
51
+ dilations (`List[List[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`):
52
+ Dilation value of each NA layer in the Transformer encoder.
53
+ mlp_ratio (`float`, *optional*, defaults to 3.0):
54
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
55
+ qkv_bias (`bool`, *optional*, defaults to `True`):
56
+ Whether or not a learnable bias should be added to the queries, keys and values.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
58
+ The dropout probability for all fully connected layers in the embeddings and encoder.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
62
+ Stochastic depth rate.
63
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
64
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
65
+ `"selu"` and `"gelu_new"` are supported.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
69
+ The epsilon used by the layer normalization layers.
70
+ layer_scale_init_value (`float`, *optional*, defaults to 0.0):
71
+ The initial value for the layer scale. Disabled if <=0.
72
+ out_features (`List[str]`, *optional*):
73
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
74
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
75
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
76
+ same order as defined in the `stage_names` attribute.
77
+ out_indices (`List[int]`, *optional*):
78
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
79
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
80
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
81
+ same order as defined in the `stage_names` attribute.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import DinatConfig, DinatModel
87
+
88
+ >>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration
89
+ >>> configuration = DinatConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration
92
+ >>> model = DinatModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "dinat"
99
+
100
+ attribute_map = {
101
+ "num_attention_heads": "num_heads",
102
+ "num_hidden_layers": "num_layers",
103
+ }
104
+
105
+ def __init__(
106
+ self,
107
+ patch_size=4,
108
+ num_channels=3,
109
+ embed_dim=64,
110
+ depths=[3, 4, 6, 5],
111
+ num_heads=[2, 4, 8, 16],
112
+ kernel_size=7,
113
+ dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]],
114
+ mlp_ratio=3.0,
115
+ qkv_bias=True,
116
+ hidden_dropout_prob=0.0,
117
+ attention_probs_dropout_prob=0.0,
118
+ drop_path_rate=0.1,
119
+ hidden_act="gelu",
120
+ initializer_range=0.02,
121
+ layer_norm_eps=1e-5,
122
+ layer_scale_init_value=0.0,
123
+ out_features=None,
124
+ out_indices=None,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(**kwargs)
128
+
129
+ self.patch_size = patch_size
130
+ self.num_channels = num_channels
131
+ self.embed_dim = embed_dim
132
+ self.depths = depths
133
+ self.num_layers = len(depths)
134
+ self.num_heads = num_heads
135
+ self.kernel_size = kernel_size
136
+ self.dilations = dilations
137
+ self.mlp_ratio = mlp_ratio
138
+ self.qkv_bias = qkv_bias
139
+ self.hidden_dropout_prob = hidden_dropout_prob
140
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
141
+ self.drop_path_rate = drop_path_rate
142
+ self.hidden_act = hidden_act
143
+ self.layer_norm_eps = layer_norm_eps
144
+ self.initializer_range = initializer_range
145
+ # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
146
+ # this indicates the channel dimension after the last stage of the model
147
+ self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
148
+ self.layer_scale_init_value = layer_scale_init_value
149
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
150
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
151
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
152
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/dinat/modeling_dinat.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Dilated Neighborhood Attention Transformer model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BackboneOutput
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
31
+ from ...utils import (
32
+ ModelOutput,
33
+ OptionalDependencyNotAvailable,
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ is_natten_available,
38
+ logging,
39
+ replace_return_docstrings,
40
+ requires_backends,
41
+ )
42
+ from ...utils.backbone_utils import BackboneMixin
43
+ from .configuration_dinat import DinatConfig
44
+
45
+
46
+ if is_natten_available():
47
+ from natten.functional import natten2dav, natten2dqkrpb
48
+ else:
49
+
50
+ def natten2dqkrpb(*args, **kwargs):
51
+ raise OptionalDependencyNotAvailable()
52
+
53
+ def natten2dav(*args, **kwargs):
54
+ raise OptionalDependencyNotAvailable()
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+
59
+ # General docstring
60
+ _CONFIG_FOR_DOC = "DinatConfig"
61
+
62
+ # Base docstring
63
+ _CHECKPOINT_FOR_DOC = "shi-labs/dinat-mini-in1k-224"
64
+ _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512]
65
+
66
+ # Image classification docstring
67
+ _IMAGE_CLASS_CHECKPOINT = "shi-labs/dinat-mini-in1k-224"
68
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
69
+
70
+
71
+ from ..deprecated._archive_maps import DINAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
72
+
73
+
74
+ # drop_path and DinatDropPath are from the timm library.
75
+
76
+
77
+ @dataclass
78
+ # Copied from transformers.models.nat.modeling_nat.NatEncoderOutput with Nat->Dinat
79
+ class DinatEncoderOutput(ModelOutput):
80
+ """
81
+ Dinat encoder's outputs, with potential hidden states and attentions.
82
+
83
+ Args:
84
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
85
+ Sequence of hidden-states at the output of the last layer of the model.
86
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
87
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
88
+ shape `(batch_size, sequence_length, hidden_size)`.
89
+
90
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
91
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
92
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
93
+ sequence_length)`.
94
+
95
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
96
+ heads.
97
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
98
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
99
+ shape `(batch_size, hidden_size, height, width)`.
100
+
101
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
102
+ include the spatial dimensions.
103
+ """
104
+
105
+ last_hidden_state: torch.FloatTensor = None
106
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
107
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
108
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
109
+
110
+
111
+ @dataclass
112
+ # Copied from transformers.models.nat.modeling_nat.NatModelOutput with Nat->Dinat
113
+ class DinatModelOutput(ModelOutput):
114
+ """
115
+ Dinat model's outputs that also contains a pooling of the last hidden states.
116
+
117
+ Args:
118
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
119
+ Sequence of hidden-states at the output of the last layer of the model.
120
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
121
+ Average pooling of the last layer hidden-state.
122
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
124
+ shape `(batch_size, sequence_length, hidden_size)`.
125
+
126
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
127
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
128
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
129
+ sequence_length)`.
130
+
131
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
132
+ heads.
133
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
134
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
135
+ shape `(batch_size, hidden_size, height, width)`.
136
+
137
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
138
+ include the spatial dimensions.
139
+ """
140
+
141
+ last_hidden_state: torch.FloatTensor = None
142
+ pooler_output: Optional[torch.FloatTensor] = None
143
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
144
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
145
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
146
+
147
+
148
+ @dataclass
149
+ # Copied from transformers.models.nat.modeling_nat.NatImageClassifierOutput with Nat->Dinat
150
+ class DinatImageClassifierOutput(ModelOutput):
151
+ """
152
+ Dinat outputs for image classification.
153
+
154
+ Args:
155
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
156
+ Classification (or regression if config.num_labels==1) loss.
157
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
158
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
159
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
160
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
161
+ shape `(batch_size, sequence_length, hidden_size)`.
162
+
163
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
164
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
165
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
166
+ sequence_length)`.
167
+
168
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
169
+ heads.
170
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
171
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
172
+ shape `(batch_size, hidden_size, height, width)`.
173
+
174
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
175
+ include the spatial dimensions.
176
+ """
177
+
178
+ loss: Optional[torch.FloatTensor] = None
179
+ logits: torch.FloatTensor = None
180
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
181
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
182
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
183
+
184
+
185
+ # Copied from transformers.models.nat.modeling_nat.NatEmbeddings with Nat->Dinat
186
+ class DinatEmbeddings(nn.Module):
187
+ """
188
+ Construct the patch and position embeddings.
189
+ """
190
+
191
+ def __init__(self, config):
192
+ super().__init__()
193
+
194
+ self.patch_embeddings = DinatPatchEmbeddings(config)
195
+
196
+ self.norm = nn.LayerNorm(config.embed_dim)
197
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
198
+
199
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]:
200
+ embeddings = self.patch_embeddings(pixel_values)
201
+ embeddings = self.norm(embeddings)
202
+
203
+ embeddings = self.dropout(embeddings)
204
+
205
+ return embeddings
206
+
207
+
208
+ # Copied from transformers.models.nat.modeling_nat.NatPatchEmbeddings with Nat->Dinat
209
+ class DinatPatchEmbeddings(nn.Module):
210
+ """
211
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
212
+ `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
213
+ Transformer.
214
+ """
215
+
216
+ def __init__(self, config):
217
+ super().__init__()
218
+ patch_size = config.patch_size
219
+ num_channels, hidden_size = config.num_channels, config.embed_dim
220
+ self.num_channels = num_channels
221
+
222
+ if patch_size == 4:
223
+ pass
224
+ else:
225
+ # TODO: Support arbitrary patch sizes.
226
+ raise ValueError("Dinat only supports patch size of 4 at the moment.")
227
+
228
+ self.projection = nn.Sequential(
229
+ nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
230
+ nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
231
+ )
232
+
233
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
234
+ _, num_channels, height, width = pixel_values.shape
235
+ if num_channels != self.num_channels:
236
+ raise ValueError(
237
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
238
+ )
239
+ embeddings = self.projection(pixel_values)
240
+ embeddings = embeddings.permute(0, 2, 3, 1)
241
+
242
+ return embeddings
243
+
244
+
245
+ # Copied from transformers.models.nat.modeling_nat.NatDownsampler with Nat->Dinat
246
+ class DinatDownsampler(nn.Module):
247
+ """
248
+ Convolutional Downsampling Layer.
249
+
250
+ Args:
251
+ dim (`int`):
252
+ Number of input channels.
253
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
254
+ Normalization layer class.
255
+ """
256
+
257
+ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
258
+ super().__init__()
259
+ self.dim = dim
260
+ self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
261
+ self.norm = norm_layer(2 * dim)
262
+
263
+ def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
264
+ input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
265
+ input_feature = self.norm(input_feature)
266
+ return input_feature
267
+
268
+
269
+ # Copied from transformers.models.beit.modeling_beit.drop_path
270
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
271
+ """
272
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
273
+
274
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
275
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
276
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
277
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
278
+ argument.
279
+ """
280
+ if drop_prob == 0.0 or not training:
281
+ return input
282
+ keep_prob = 1 - drop_prob
283
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
284
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
285
+ random_tensor.floor_() # binarize
286
+ output = input.div(keep_prob) * random_tensor
287
+ return output
288
+
289
+
290
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Dinat
291
+ class DinatDropPath(nn.Module):
292
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
293
+
294
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
295
+ super().__init__()
296
+ self.drop_prob = drop_prob
297
+
298
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
299
+ return drop_path(hidden_states, self.drop_prob, self.training)
300
+
301
+ def extra_repr(self) -> str:
302
+ return "p={}".format(self.drop_prob)
303
+
304
+
305
+ class NeighborhoodAttention(nn.Module):
306
+ def __init__(self, config, dim, num_heads, kernel_size, dilation):
307
+ super().__init__()
308
+ if dim % num_heads != 0:
309
+ raise ValueError(
310
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
311
+ )
312
+
313
+ self.num_attention_heads = num_heads
314
+ self.attention_head_size = int(dim / num_heads)
315
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
316
+ self.kernel_size = kernel_size
317
+ self.dilation = dilation
318
+
319
+ # rpb is learnable relative positional biases; same concept is used Swin.
320
+ self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1)))
321
+
322
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
323
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
324
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
325
+
326
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
327
+
328
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttention.transpose_for_scores with Nat->Dinat
329
+ def transpose_for_scores(self, x):
330
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
331
+ x = x.view(new_x_shape)
332
+ return x.permute(0, 3, 1, 2, 4)
333
+
334
+ def forward(
335
+ self,
336
+ hidden_states: torch.Tensor,
337
+ output_attentions: Optional[bool] = False,
338
+ ) -> Tuple[torch.Tensor]:
339
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
340
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
341
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
342
+
343
+ # Apply the scale factor before computing attention weights. It's usually more efficient because
344
+ # attention weights are typically a bigger tensor compared to query.
345
+ # It gives identical results because scalars are commutable in matrix multiplication.
346
+ query_layer = query_layer / math.sqrt(self.attention_head_size)
347
+
348
+ # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases.
349
+ attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, self.dilation)
350
+
351
+ # Normalize the attention scores to probabilities.
352
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
353
+
354
+ # This is actually dropping out entire tokens to attend to, which might
355
+ # seem a bit unusual, but is taken from the original Transformer paper.
356
+ attention_probs = self.dropout(attention_probs)
357
+
358
+ context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, self.dilation)
359
+ context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous()
360
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
361
+ context_layer = context_layer.view(new_context_layer_shape)
362
+
363
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
364
+
365
+ return outputs
366
+
367
+
368
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionOutput
369
+ class NeighborhoodAttentionOutput(nn.Module):
370
+ def __init__(self, config, dim):
371
+ super().__init__()
372
+ self.dense = nn.Linear(dim, dim)
373
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
374
+
375
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
376
+ hidden_states = self.dense(hidden_states)
377
+ hidden_states = self.dropout(hidden_states)
378
+
379
+ return hidden_states
380
+
381
+
382
+ class NeighborhoodAttentionModule(nn.Module):
383
+ def __init__(self, config, dim, num_heads, kernel_size, dilation):
384
+ super().__init__()
385
+ self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size, dilation)
386
+ self.output = NeighborhoodAttentionOutput(config, dim)
387
+ self.pruned_heads = set()
388
+
389
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionModule.prune_heads
390
+ def prune_heads(self, heads):
391
+ if len(heads) == 0:
392
+ return
393
+ heads, index = find_pruneable_heads_and_indices(
394
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
395
+ )
396
+
397
+ # Prune linear layers
398
+ self.self.query = prune_linear_layer(self.self.query, index)
399
+ self.self.key = prune_linear_layer(self.self.key, index)
400
+ self.self.value = prune_linear_layer(self.self.value, index)
401
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
402
+
403
+ # Update hyper params and store pruned heads
404
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
405
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
406
+ self.pruned_heads = self.pruned_heads.union(heads)
407
+
408
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionModule.forward
409
+ def forward(
410
+ self,
411
+ hidden_states: torch.Tensor,
412
+ output_attentions: Optional[bool] = False,
413
+ ) -> Tuple[torch.Tensor]:
414
+ self_outputs = self.self(hidden_states, output_attentions)
415
+ attention_output = self.output(self_outputs[0], hidden_states)
416
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
417
+ return outputs
418
+
419
+
420
+ # Copied from transformers.models.nat.modeling_nat.NatIntermediate with Nat->Dinat
421
+ class DinatIntermediate(nn.Module):
422
+ def __init__(self, config, dim):
423
+ super().__init__()
424
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
425
+ if isinstance(config.hidden_act, str):
426
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
427
+ else:
428
+ self.intermediate_act_fn = config.hidden_act
429
+
430
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
431
+ hidden_states = self.dense(hidden_states)
432
+ hidden_states = self.intermediate_act_fn(hidden_states)
433
+ return hidden_states
434
+
435
+
436
+ # Copied from transformers.models.nat.modeling_nat.NatOutput with Nat->Dinat
437
+ class DinatOutput(nn.Module):
438
+ def __init__(self, config, dim):
439
+ super().__init__()
440
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
441
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
442
+
443
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
444
+ hidden_states = self.dense(hidden_states)
445
+ hidden_states = self.dropout(hidden_states)
446
+ return hidden_states
447
+
448
+
449
+ class DinatLayer(nn.Module):
450
+ def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0):
451
+ super().__init__()
452
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
453
+ self.kernel_size = config.kernel_size
454
+ self.dilation = dilation
455
+ self.window_size = self.kernel_size * self.dilation
456
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
457
+ self.attention = NeighborhoodAttentionModule(
458
+ config, dim, num_heads, kernel_size=self.kernel_size, dilation=self.dilation
459
+ )
460
+ self.drop_path = DinatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
461
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
462
+ self.intermediate = DinatIntermediate(config, dim)
463
+ self.output = DinatOutput(config, dim)
464
+ self.layer_scale_parameters = (
465
+ nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True)
466
+ if config.layer_scale_init_value > 0
467
+ else None
468
+ )
469
+
470
+ def maybe_pad(self, hidden_states, height, width):
471
+ window_size = self.window_size
472
+ pad_values = (0, 0, 0, 0, 0, 0)
473
+ if height < window_size or width < window_size:
474
+ pad_l = pad_t = 0
475
+ pad_r = max(0, window_size - width)
476
+ pad_b = max(0, window_size - height)
477
+ pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b)
478
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
479
+ return hidden_states, pad_values
480
+
481
+ def forward(
482
+ self,
483
+ hidden_states: torch.Tensor,
484
+ output_attentions: Optional[bool] = False,
485
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
486
+ batch_size, height, width, channels = hidden_states.size()
487
+ shortcut = hidden_states
488
+
489
+ hidden_states = self.layernorm_before(hidden_states)
490
+ # pad hidden_states if they are smaller than kernel size x dilation
491
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
492
+
493
+ _, height_pad, width_pad, _ = hidden_states.shape
494
+
495
+ attention_outputs = self.attention(hidden_states, output_attentions=output_attentions)
496
+
497
+ attention_output = attention_outputs[0]
498
+
499
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
500
+ if was_padded:
501
+ attention_output = attention_output[:, :height, :width, :].contiguous()
502
+
503
+ if self.layer_scale_parameters is not None:
504
+ attention_output = self.layer_scale_parameters[0] * attention_output
505
+
506
+ hidden_states = shortcut + self.drop_path(attention_output)
507
+
508
+ layer_output = self.layernorm_after(hidden_states)
509
+ layer_output = self.output(self.intermediate(layer_output))
510
+
511
+ if self.layer_scale_parameters is not None:
512
+ layer_output = self.layer_scale_parameters[1] * layer_output
513
+
514
+ layer_output = hidden_states + self.drop_path(layer_output)
515
+
516
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
517
+ return layer_outputs
518
+
519
+
520
+ class DinatStage(nn.Module):
521
+ def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample):
522
+ super().__init__()
523
+ self.config = config
524
+ self.dim = dim
525
+ self.layers = nn.ModuleList(
526
+ [
527
+ DinatLayer(
528
+ config=config,
529
+ dim=dim,
530
+ num_heads=num_heads,
531
+ dilation=dilations[i],
532
+ drop_path_rate=drop_path_rate[i],
533
+ )
534
+ for i in range(depth)
535
+ ]
536
+ )
537
+
538
+ # patch merging layer
539
+ if downsample is not None:
540
+ self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
541
+ else:
542
+ self.downsample = None
543
+
544
+ self.pointing = False
545
+
546
+ # Copied from transformers.models.nat.modeling_nat.NatStage.forward
547
+ def forward(
548
+ self,
549
+ hidden_states: torch.Tensor,
550
+ output_attentions: Optional[bool] = False,
551
+ ) -> Tuple[torch.Tensor]:
552
+ _, height, width, _ = hidden_states.size()
553
+ for i, layer_module in enumerate(self.layers):
554
+ layer_outputs = layer_module(hidden_states, output_attentions)
555
+ hidden_states = layer_outputs[0]
556
+
557
+ hidden_states_before_downsampling = hidden_states
558
+ if self.downsample is not None:
559
+ hidden_states = self.downsample(hidden_states_before_downsampling)
560
+
561
+ stage_outputs = (hidden_states, hidden_states_before_downsampling)
562
+
563
+ if output_attentions:
564
+ stage_outputs += layer_outputs[1:]
565
+ return stage_outputs
566
+
567
+
568
+ class DinatEncoder(nn.Module):
569
+ def __init__(self, config):
570
+ super().__init__()
571
+ self.num_levels = len(config.depths)
572
+ self.config = config
573
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
574
+ self.levels = nn.ModuleList(
575
+ [
576
+ DinatStage(
577
+ config=config,
578
+ dim=int(config.embed_dim * 2**i_layer),
579
+ depth=config.depths[i_layer],
580
+ num_heads=config.num_heads[i_layer],
581
+ dilations=config.dilations[i_layer],
582
+ drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
583
+ downsample=DinatDownsampler if (i_layer < self.num_levels - 1) else None,
584
+ )
585
+ for i_layer in range(self.num_levels)
586
+ ]
587
+ )
588
+
589
+ # Copied from transformers.models.nat.modeling_nat.NatEncoder.forward with Nat->Dinat
590
+ def forward(
591
+ self,
592
+ hidden_states: torch.Tensor,
593
+ output_attentions: Optional[bool] = False,
594
+ output_hidden_states: Optional[bool] = False,
595
+ output_hidden_states_before_downsampling: Optional[bool] = False,
596
+ return_dict: Optional[bool] = True,
597
+ ) -> Union[Tuple, DinatEncoderOutput]:
598
+ all_hidden_states = () if output_hidden_states else None
599
+ all_reshaped_hidden_states = () if output_hidden_states else None
600
+ all_self_attentions = () if output_attentions else None
601
+
602
+ if output_hidden_states:
603
+ # rearrange b h w c -> b c h w
604
+ reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
605
+ all_hidden_states += (hidden_states,)
606
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
607
+
608
+ for i, layer_module in enumerate(self.levels):
609
+ layer_outputs = layer_module(hidden_states, output_attentions)
610
+
611
+ hidden_states = layer_outputs[0]
612
+ hidden_states_before_downsampling = layer_outputs[1]
613
+
614
+ if output_hidden_states and output_hidden_states_before_downsampling:
615
+ # rearrange b h w c -> b c h w
616
+ reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2)
617
+ all_hidden_states += (hidden_states_before_downsampling,)
618
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
619
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
620
+ # rearrange b h w c -> b c h w
621
+ reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
622
+ all_hidden_states += (hidden_states,)
623
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
624
+
625
+ if output_attentions:
626
+ all_self_attentions += layer_outputs[2:]
627
+
628
+ if not return_dict:
629
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
630
+
631
+ return DinatEncoderOutput(
632
+ last_hidden_state=hidden_states,
633
+ hidden_states=all_hidden_states,
634
+ attentions=all_self_attentions,
635
+ reshaped_hidden_states=all_reshaped_hidden_states,
636
+ )
637
+
638
+
639
+ class DinatPreTrainedModel(PreTrainedModel):
640
+ """
641
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
642
+ models.
643
+ """
644
+
645
+ config_class = DinatConfig
646
+ base_model_prefix = "dinat"
647
+ main_input_name = "pixel_values"
648
+
649
+ def _init_weights(self, module):
650
+ """Initialize the weights"""
651
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
652
+ # Slightly different from the TF version which uses truncated_normal for initialization
653
+ # cf https://github.com/pytorch/pytorch/pull/5617
654
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
655
+ if module.bias is not None:
656
+ module.bias.data.zero_()
657
+ elif isinstance(module, nn.LayerNorm):
658
+ module.bias.data.zero_()
659
+ module.weight.data.fill_(1.0)
660
+
661
+
662
+ DINAT_START_DOCSTRING = r"""
663
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
664
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
665
+ behavior.
666
+
667
+ Parameters:
668
+ config ([`DinatConfig`]): Model configuration class with all the parameters of the model.
669
+ Initializing with a config file does not load the weights associated with the model, only the
670
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
671
+ """
672
+
673
+ DINAT_INPUTS_DOCSTRING = r"""
674
+ Args:
675
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
676
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
677
+ for details.
678
+
679
+ output_attentions (`bool`, *optional*):
680
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
681
+ tensors for more detail.
682
+ output_hidden_states (`bool`, *optional*):
683
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
684
+ more detail.
685
+ return_dict (`bool`, *optional*):
686
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
687
+ """
688
+
689
+
690
+ @add_start_docstrings(
691
+ "The bare Dinat Model transformer outputting raw hidden-states without any specific head on top.",
692
+ DINAT_START_DOCSTRING,
693
+ )
694
+ # Copied from transformers.models.nat.modeling_nat.NatModel with Nat->Dinat, NAT->DINAT
695
+ class DinatModel(DinatPreTrainedModel):
696
+ def __init__(self, config, add_pooling_layer=True):
697
+ super().__init__(config)
698
+
699
+ requires_backends(self, ["natten"])
700
+
701
+ self.config = config
702
+ self.num_levels = len(config.depths)
703
+ self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1))
704
+
705
+ self.embeddings = DinatEmbeddings(config)
706
+ self.encoder = DinatEncoder(config)
707
+
708
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
709
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
710
+
711
+ # Initialize weights and apply final processing
712
+ self.post_init()
713
+
714
+ def get_input_embeddings(self):
715
+ return self.embeddings.patch_embeddings
716
+
717
+ def _prune_heads(self, heads_to_prune):
718
+ """
719
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
720
+ class PreTrainedModel
721
+ """
722
+ for layer, heads in heads_to_prune.items():
723
+ self.encoder.layer[layer].attention.prune_heads(heads)
724
+
725
+ @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING)
726
+ @add_code_sample_docstrings(
727
+ checkpoint=_CHECKPOINT_FOR_DOC,
728
+ output_type=DinatModelOutput,
729
+ config_class=_CONFIG_FOR_DOC,
730
+ modality="vision",
731
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
732
+ )
733
+ def forward(
734
+ self,
735
+ pixel_values: Optional[torch.FloatTensor] = None,
736
+ output_attentions: Optional[bool] = None,
737
+ output_hidden_states: Optional[bool] = None,
738
+ return_dict: Optional[bool] = None,
739
+ ) -> Union[Tuple, DinatModelOutput]:
740
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
741
+ output_hidden_states = (
742
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
743
+ )
744
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
745
+
746
+ if pixel_values is None:
747
+ raise ValueError("You have to specify pixel_values")
748
+
749
+ embedding_output = self.embeddings(pixel_values)
750
+
751
+ encoder_outputs = self.encoder(
752
+ embedding_output,
753
+ output_attentions=output_attentions,
754
+ output_hidden_states=output_hidden_states,
755
+ return_dict=return_dict,
756
+ )
757
+
758
+ sequence_output = encoder_outputs[0]
759
+ sequence_output = self.layernorm(sequence_output)
760
+
761
+ pooled_output = None
762
+ if self.pooler is not None:
763
+ pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2))
764
+ pooled_output = torch.flatten(pooled_output, 1)
765
+
766
+ if not return_dict:
767
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
768
+
769
+ return output
770
+
771
+ return DinatModelOutput(
772
+ last_hidden_state=sequence_output,
773
+ pooler_output=pooled_output,
774
+ hidden_states=encoder_outputs.hidden_states,
775
+ attentions=encoder_outputs.attentions,
776
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
777
+ )
778
+
779
+
780
+ @add_start_docstrings(
781
+ """
782
+ Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state
783
+ of the [CLS] token) e.g. for ImageNet.
784
+ """,
785
+ DINAT_START_DOCSTRING,
786
+ )
787
+ class DinatForImageClassification(DinatPreTrainedModel):
788
+ def __init__(self, config):
789
+ super().__init__(config)
790
+
791
+ requires_backends(self, ["natten"])
792
+
793
+ self.num_labels = config.num_labels
794
+ self.dinat = DinatModel(config)
795
+
796
+ # Classifier head
797
+ self.classifier = (
798
+ nn.Linear(self.dinat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
799
+ )
800
+
801
+ # Initialize weights and apply final processing
802
+ self.post_init()
803
+
804
+ @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING)
805
+ @add_code_sample_docstrings(
806
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
807
+ output_type=DinatImageClassifierOutput,
808
+ config_class=_CONFIG_FOR_DOC,
809
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
810
+ )
811
+ def forward(
812
+ self,
813
+ pixel_values: Optional[torch.FloatTensor] = None,
814
+ labels: Optional[torch.LongTensor] = None,
815
+ output_attentions: Optional[bool] = None,
816
+ output_hidden_states: Optional[bool] = None,
817
+ return_dict: Optional[bool] = None,
818
+ ) -> Union[Tuple, DinatImageClassifierOutput]:
819
+ r"""
820
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
821
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
822
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
823
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
824
+ """
825
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
826
+
827
+ outputs = self.dinat(
828
+ pixel_values,
829
+ output_attentions=output_attentions,
830
+ output_hidden_states=output_hidden_states,
831
+ return_dict=return_dict,
832
+ )
833
+
834
+ pooled_output = outputs[1]
835
+
836
+ logits = self.classifier(pooled_output)
837
+
838
+ loss = None
839
+ if labels is not None:
840
+ if self.config.problem_type is None:
841
+ if self.num_labels == 1:
842
+ self.config.problem_type = "regression"
843
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
844
+ self.config.problem_type = "single_label_classification"
845
+ else:
846
+ self.config.problem_type = "multi_label_classification"
847
+
848
+ if self.config.problem_type == "regression":
849
+ loss_fct = MSELoss()
850
+ if self.num_labels == 1:
851
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
852
+ else:
853
+ loss = loss_fct(logits, labels)
854
+ elif self.config.problem_type == "single_label_classification":
855
+ loss_fct = CrossEntropyLoss()
856
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
857
+ elif self.config.problem_type == "multi_label_classification":
858
+ loss_fct = BCEWithLogitsLoss()
859
+ loss = loss_fct(logits, labels)
860
+
861
+ if not return_dict:
862
+ output = (logits,) + outputs[2:]
863
+ return ((loss,) + output) if loss is not None else output
864
+
865
+ return DinatImageClassifierOutput(
866
+ loss=loss,
867
+ logits=logits,
868
+ hidden_states=outputs.hidden_states,
869
+ attentions=outputs.attentions,
870
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
871
+ )
872
+
873
+
874
+ @add_start_docstrings(
875
+ "NAT backbone, to be used with frameworks like DETR and MaskFormer.",
876
+ DINAT_START_DOCSTRING,
877
+ )
878
+ class DinatBackbone(DinatPreTrainedModel, BackboneMixin):
879
+ def __init__(self, config):
880
+ super().__init__(config)
881
+ super()._init_backbone(config)
882
+
883
+ requires_backends(self, ["natten"])
884
+
885
+ self.embeddings = DinatEmbeddings(config)
886
+ self.encoder = DinatEncoder(config)
887
+ self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))]
888
+
889
+ # Add layer norms to hidden states of out_features
890
+ hidden_states_norms = {}
891
+ for stage, num_channels in zip(self._out_features, self.channels):
892
+ hidden_states_norms[stage] = nn.LayerNorm(num_channels)
893
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
894
+
895
+ # Initialize weights and apply final processing
896
+ self.post_init()
897
+
898
+ def get_input_embeddings(self):
899
+ return self.embeddings.patch_embeddings
900
+
901
+ @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING)
902
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
903
+ def forward(
904
+ self,
905
+ pixel_values: torch.Tensor,
906
+ output_hidden_states: Optional[bool] = None,
907
+ output_attentions: Optional[bool] = None,
908
+ return_dict: Optional[bool] = None,
909
+ ) -> BackboneOutput:
910
+ """
911
+ Returns:
912
+
913
+ Examples:
914
+
915
+ ```python
916
+ >>> from transformers import AutoImageProcessor, AutoBackbone
917
+ >>> import torch
918
+ >>> from PIL import Image
919
+ >>> import requests
920
+
921
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
922
+ >>> image = Image.open(requests.get(url, stream=True).raw)
923
+
924
+ >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
925
+ >>> model = AutoBackbone.from_pretrained(
926
+ ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
927
+ ... )
928
+
929
+ >>> inputs = processor(image, return_tensors="pt")
930
+
931
+ >>> outputs = model(**inputs)
932
+
933
+ >>> feature_maps = outputs.feature_maps
934
+ >>> list(feature_maps[-1].shape)
935
+ [1, 512, 7, 7]
936
+ ```"""
937
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
938
+ output_hidden_states = (
939
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
940
+ )
941
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
942
+
943
+ embedding_output = self.embeddings(pixel_values)
944
+
945
+ outputs = self.encoder(
946
+ embedding_output,
947
+ output_attentions=output_attentions,
948
+ output_hidden_states=True,
949
+ output_hidden_states_before_downsampling=True,
950
+ return_dict=True,
951
+ )
952
+
953
+ hidden_states = outputs.reshaped_hidden_states
954
+
955
+ feature_maps = ()
956
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
957
+ if stage in self.out_features:
958
+ batch_size, num_channels, height, width = hidden_state.shape
959
+ hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
960
+ hidden_state = hidden_state.view(batch_size, height * width, num_channels)
961
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
962
+ hidden_state = hidden_state.view(batch_size, height, width, num_channels)
963
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
964
+ feature_maps += (hidden_state,)
965
+
966
+ if not return_dict:
967
+ output = (feature_maps,)
968
+ if output_hidden_states:
969
+ output += (outputs.hidden_states,)
970
+ return output
971
+
972
+ return BackboneOutput(
973
+ feature_maps=feature_maps,
974
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
975
+ attentions=outputs.attentions,
976
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertOnnxConfig"],
22
+ "tokenization_flaubert": ["FlaubertTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_flaubert"] = [
32
+ "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "FlaubertForMultipleChoice",
34
+ "FlaubertForQuestionAnswering",
35
+ "FlaubertForQuestionAnsweringSimple",
36
+ "FlaubertForSequenceClassification",
37
+ "FlaubertForTokenClassification",
38
+ "FlaubertModel",
39
+ "FlaubertWithLMHeadModel",
40
+ "FlaubertPreTrainedModel",
41
+ ]
42
+
43
+ try:
44
+ if not is_tf_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_tf_flaubert"] = [
50
+ "TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "TFFlaubertForMultipleChoice",
52
+ "TFFlaubertForQuestionAnsweringSimple",
53
+ "TFFlaubertForSequenceClassification",
54
+ "TFFlaubertForTokenClassification",
55
+ "TFFlaubertModel",
56
+ "TFFlaubertPreTrainedModel",
57
+ "TFFlaubertWithLMHeadModel",
58
+ ]
59
+
60
+
61
+ if TYPE_CHECKING:
62
+ from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertOnnxConfig
63
+ from .tokenization_flaubert import FlaubertTokenizer
64
+
65
+ try:
66
+ if not is_torch_available():
67
+ raise OptionalDependencyNotAvailable()
68
+ except OptionalDependencyNotAvailable:
69
+ pass
70
+ else:
71
+ from .modeling_flaubert import (
72
+ FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
73
+ FlaubertForMultipleChoice,
74
+ FlaubertForQuestionAnswering,
75
+ FlaubertForQuestionAnsweringSimple,
76
+ FlaubertForSequenceClassification,
77
+ FlaubertForTokenClassification,
78
+ FlaubertModel,
79
+ FlaubertPreTrainedModel,
80
+ FlaubertWithLMHeadModel,
81
+ )
82
+
83
+ try:
84
+ if not is_tf_available():
85
+ raise OptionalDependencyNotAvailable()
86
+ except OptionalDependencyNotAvailable:
87
+ pass
88
+ else:
89
+ from .modeling_tf_flaubert import (
90
+ TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
91
+ TFFlaubertForMultipleChoice,
92
+ TFFlaubertForQuestionAnsweringSimple,
93
+ TFFlaubertForSequenceClassification,
94
+ TFFlaubertForTokenClassification,
95
+ TFFlaubertModel,
96
+ TFFlaubertPreTrainedModel,
97
+ TFFlaubertWithLMHeadModel,
98
+ )
99
+
100
+ else:
101
+ import sys
102
+
103
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/configuration_flaubert.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_flaubert.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_tf_flaubert.cpython-310.pyc ADDED
Binary file (38.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/tokenization_flaubert.cpython-310.pyc ADDED
Binary file (18.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/configuration_flaubert.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flaubert configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class FlaubertConfig(PretrainedConfig):
31
+ """
32
+ This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is
33
+ used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.
34
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FlauBERT
35
+ [flaubert/flaubert_base_uncased](https://huggingface.co/flaubert/flaubert_base_uncased) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ pre_norm (`bool`, *optional*, defaults to `False`):
42
+ Whether to apply the layer normalization before or after the feed forward layer following the attention in
43
+ each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)
44
+ layerdrop (`float`, *optional*, defaults to 0.0):
45
+ Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with
46
+ Structured Dropout. ICLR 2020)
47
+ vocab_size (`int`, *optional*, defaults to 30145):
48
+ Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by
49
+ the `inputs_ids` passed when calling [`FlaubertModel`] or [`TFFlaubertModel`].
50
+ emb_dim (`int`, *optional*, defaults to 2048):
51
+ Dimensionality of the encoder layers and the pooler layer.
52
+ n_layer (`int`, *optional*, defaults to 12):
53
+ Number of hidden layers in the Transformer encoder.
54
+ n_head (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ dropout (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for the attention mechanism
60
+ gelu_activation (`bool`, *optional*, defaults to `True`):
61
+ Whether or not to use a *gelu* activation instead of *relu*.
62
+ sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
63
+ Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
64
+ causal (`bool`, *optional*, defaults to `False`):
65
+ Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
66
+ order to only attend to the left-side context instead if a bidirectional context.
67
+ asm (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
69
+ layer.
70
+ n_langs (`int`, *optional*, defaults to 1):
71
+ The number of languages the model handles. Set to 1 for monolingual models.
72
+ use_lang_emb (`bool`, *optional*, defaults to `True`)
73
+ Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
74
+ models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
75
+ on how to use them.
76
+ max_position_embeddings (`int`, *optional*, defaults to 512):
77
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
78
+ just in case (e.g., 512 or 1024 or 2048).
79
+ embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
80
+ The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
81
+ init_std (`int`, *optional*, defaults to 50257):
82
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
83
+ embedding matrices.
84
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
85
+ The epsilon used by the layer normalization layers.
86
+ bos_index (`int`, *optional*, defaults to 0):
87
+ The index of the beginning of sentence token in the vocabulary.
88
+ eos_index (`int`, *optional*, defaults to 1):
89
+ The index of the end of sentence token in the vocabulary.
90
+ pad_index (`int`, *optional*, defaults to 2):
91
+ The index of the padding token in the vocabulary.
92
+ unk_index (`int`, *optional*, defaults to 3):
93
+ The index of the unknown token in the vocabulary.
94
+ mask_index (`int`, *optional*, defaults to 5):
95
+ The index of the masking token in the vocabulary.
96
+ is_encoder(`bool`, *optional*, defaults to `True`):
97
+ Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
98
+ summary_type (`string`, *optional*, defaults to "first"):
99
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
100
+
101
+ Has to be one of the following options:
102
+
103
+ - `"last"`: Take the last token hidden state (like XLNet).
104
+ - `"first"`: Take the first token hidden state (like BERT).
105
+ - `"mean"`: Take the mean of all tokens hidden states.
106
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
107
+ - `"attn"`: Not implemented now, use multi-head attention.
108
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
109
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
110
+
111
+ Whether or not to add a projection after the vector extraction.
112
+ summary_activation (`str`, *optional*):
113
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
114
+
115
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
116
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
117
+ Used in the sequence classification and multiple choice models.
118
+
119
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
120
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
121
+ Used in the sequence classification and multiple choice models.
122
+
123
+ The dropout ratio to be used after the projection and activation.
124
+ start_n_top (`int`, *optional*, defaults to 5):
125
+ Used in the SQuAD evaluation script.
126
+ end_n_top (`int`, *optional*, defaults to 5):
127
+ Used in the SQuAD evaluation script.
128
+ mask_token_id (`int`, *optional*, defaults to 0):
129
+ Model agnostic parameter to identify masked tokens when generating text in an MLM context.
130
+ lang_id (`int`, *optional*, defaults to 1):
131
+ The ID of the language used by the model. This parameter is used when generating text in a given language.
132
+ """
133
+
134
+ model_type = "flaubert"
135
+ attribute_map = {
136
+ "hidden_size": "emb_dim",
137
+ "num_attention_heads": "n_heads",
138
+ "num_hidden_layers": "n_layers",
139
+ "n_words": "vocab_size", # For backward compatibility
140
+ }
141
+
142
+ def __init__(
143
+ self,
144
+ pre_norm=False,
145
+ layerdrop=0.0,
146
+ vocab_size=30145,
147
+ emb_dim=2048,
148
+ n_layers=12,
149
+ n_heads=16,
150
+ dropout=0.1,
151
+ attention_dropout=0.1,
152
+ gelu_activation=True,
153
+ sinusoidal_embeddings=False,
154
+ causal=False,
155
+ asm=False,
156
+ n_langs=1,
157
+ use_lang_emb=True,
158
+ max_position_embeddings=512,
159
+ embed_init_std=2048**-0.5,
160
+ layer_norm_eps=1e-12,
161
+ init_std=0.02,
162
+ bos_index=0,
163
+ eos_index=1,
164
+ pad_index=2,
165
+ unk_index=3,
166
+ mask_index=5,
167
+ is_encoder=True,
168
+ summary_type="first",
169
+ summary_use_proj=True,
170
+ summary_activation=None,
171
+ summary_proj_to_labels=True,
172
+ summary_first_dropout=0.1,
173
+ start_n_top=5,
174
+ end_n_top=5,
175
+ mask_token_id=0,
176
+ lang_id=0,
177
+ pad_token_id=2,
178
+ bos_token_id=0,
179
+ **kwargs,
180
+ ):
181
+ """Constructs FlaubertConfig."""
182
+ self.pre_norm = pre_norm
183
+ self.layerdrop = layerdrop
184
+ self.vocab_size = vocab_size
185
+ self.emb_dim = emb_dim
186
+ self.n_layers = n_layers
187
+ self.n_heads = n_heads
188
+ self.dropout = dropout
189
+ self.attention_dropout = attention_dropout
190
+ self.gelu_activation = gelu_activation
191
+ self.sinusoidal_embeddings = sinusoidal_embeddings
192
+ self.causal = causal
193
+ self.asm = asm
194
+ self.n_langs = n_langs
195
+ self.use_lang_emb = use_lang_emb
196
+ self.layer_norm_eps = layer_norm_eps
197
+ self.bos_index = bos_index
198
+ self.eos_index = eos_index
199
+ self.pad_index = pad_index
200
+ self.unk_index = unk_index
201
+ self.mask_index = mask_index
202
+ self.is_encoder = is_encoder
203
+ self.max_position_embeddings = max_position_embeddings
204
+ self.embed_init_std = embed_init_std
205
+ self.init_std = init_std
206
+ self.summary_type = summary_type
207
+ self.summary_use_proj = summary_use_proj
208
+ self.summary_activation = summary_activation
209
+ self.summary_proj_to_labels = summary_proj_to_labels
210
+ self.summary_first_dropout = summary_first_dropout
211
+ self.start_n_top = start_n_top
212
+ self.end_n_top = end_n_top
213
+ self.mask_token_id = mask_token_id
214
+ self.lang_id = lang_id
215
+
216
+ if "n_words" in kwargs:
217
+ self.n_words = kwargs["n_words"]
218
+
219
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs)
220
+
221
+
222
+ class FlaubertOnnxConfig(OnnxConfig):
223
+ @property
224
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
225
+ if self.task == "multiple-choice":
226
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
227
+ else:
228
+ dynamic_axis = {0: "batch", 1: "sequence"}
229
+ return OrderedDict(
230
+ [
231
+ ("input_ids", dynamic_axis),
232
+ ("attention_mask", dynamic_axis),
233
+ ]
234
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/modeling_flaubert.py ADDED
@@ -0,0 +1,1302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Flaubert model, based on XLM."""
16
+
17
+ import itertools
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Dict, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import gelu
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead
37
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from .configuration_flaubert import FlaubertConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased"
52
+ _CONFIG_FOR_DOC = "FlaubertConfig"
53
+
54
+
55
+ from ..deprecated._archive_maps import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ # Copied from transformers.models.xlm.modeling_xlm.create_sinusoidal_embeddings
59
+ def create_sinusoidal_embeddings(n_pos, dim, out):
60
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
61
+ out.requires_grad = False
62
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
63
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
64
+ out.detach_()
65
+
66
+
67
+ # Copied from transformers.models.xlm.modeling_xlm.get_masks
68
+ def get_masks(slen, lengths, causal, padding_mask=None):
69
+ """
70
+ Generate hidden states mask, and optionally an attention mask.
71
+ """
72
+ alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
73
+ if padding_mask is not None:
74
+ mask = padding_mask
75
+ else:
76
+ assert lengths.max().item() <= slen
77
+ mask = alen < lengths[:, None]
78
+
79
+ # attention mask is the same as mask, or triangular inferior attention (causal)
80
+ bs = lengths.size(0)
81
+ if causal:
82
+ attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
83
+ else:
84
+ attn_mask = mask
85
+
86
+ # sanity check
87
+ assert mask.size() == (bs, slen)
88
+ assert causal is False or attn_mask.size() == (bs, slen, slen)
89
+
90
+ return mask, attn_mask
91
+
92
+
93
+ # Copied from transformers.models.xlm.modeling_xlm.MultiHeadAttention
94
+ class MultiHeadAttention(nn.Module):
95
+ NEW_ID = itertools.count()
96
+
97
+ def __init__(self, n_heads, dim, config):
98
+ super().__init__()
99
+ self.layer_id = next(MultiHeadAttention.NEW_ID)
100
+ self.dim = dim
101
+ self.n_heads = n_heads
102
+ self.dropout = config.attention_dropout
103
+ assert self.dim % self.n_heads == 0
104
+
105
+ self.q_lin = nn.Linear(dim, dim)
106
+ self.k_lin = nn.Linear(dim, dim)
107
+ self.v_lin = nn.Linear(dim, dim)
108
+ self.out_lin = nn.Linear(dim, dim)
109
+ self.pruned_heads = set()
110
+
111
+ def prune_heads(self, heads):
112
+ attention_head_size = self.dim // self.n_heads
113
+ if len(heads) == 0:
114
+ return
115
+ heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
116
+ # Prune linear layers
117
+ self.q_lin = prune_linear_layer(self.q_lin, index)
118
+ self.k_lin = prune_linear_layer(self.k_lin, index)
119
+ self.v_lin = prune_linear_layer(self.v_lin, index)
120
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
121
+ # Update hyper params
122
+ self.n_heads = self.n_heads - len(heads)
123
+ self.dim = attention_head_size * self.n_heads
124
+ self.pruned_heads = self.pruned_heads.union(heads)
125
+
126
+ def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):
127
+ """
128
+ Self-attention (if kv is None) or attention over source sentence (provided by kv).
129
+ """
130
+ # Input is (bs, qlen, dim)
131
+ # Mask is (bs, klen) (non-causal) or (bs, klen, klen)
132
+ bs, qlen, dim = input.size()
133
+ if kv is None:
134
+ klen = qlen if cache is None else cache["slen"] + qlen
135
+ else:
136
+ klen = kv.size(1)
137
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
138
+ n_heads = self.n_heads
139
+ dim_per_head = self.dim // n_heads
140
+ mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
141
+
142
+ def shape(x):
143
+ """projection"""
144
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
145
+
146
+ def unshape(x):
147
+ """compute context"""
148
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
149
+
150
+ q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
151
+ if kv is None:
152
+ k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
153
+ v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
154
+ elif cache is None or self.layer_id not in cache:
155
+ k = v = kv
156
+ k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
157
+ v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
158
+
159
+ if cache is not None:
160
+ if self.layer_id in cache:
161
+ if kv is None:
162
+ k_, v_ = cache[self.layer_id]
163
+ k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
164
+ v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
165
+ else:
166
+ k, v = cache[self.layer_id]
167
+ cache[self.layer_id] = (k, v)
168
+
169
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
170
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
171
+ mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
172
+ scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen)
173
+
174
+ weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
175
+ weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
176
+
177
+ # Mask heads if we want to
178
+ if head_mask is not None:
179
+ weights = weights * head_mask
180
+
181
+ context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
182
+ context = unshape(context) # (bs, qlen, dim)
183
+
184
+ outputs = (self.out_lin(context),)
185
+ if output_attentions:
186
+ outputs = outputs + (weights,)
187
+ return outputs
188
+
189
+
190
+ # Copied from transformers.models.xlm.modeling_xlm.TransformerFFN
191
+ class TransformerFFN(nn.Module):
192
+ def __init__(self, in_dim, dim_hidden, out_dim, config):
193
+ super().__init__()
194
+ self.dropout = config.dropout
195
+ self.lin1 = nn.Linear(in_dim, dim_hidden)
196
+ self.lin2 = nn.Linear(dim_hidden, out_dim)
197
+ self.act = gelu if config.gelu_activation else nn.functional.relu
198
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
199
+ self.seq_len_dim = 1
200
+
201
+ def forward(self, input):
202
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
203
+
204
+ def ff_chunk(self, input):
205
+ x = self.lin1(input)
206
+ x = self.act(x)
207
+ x = self.lin2(x)
208
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
209
+ return x
210
+
211
+
212
+ FLAUBERT_START_DOCSTRING = r"""
213
+
214
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
215
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
216
+ etc.)
217
+
218
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
219
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
220
+ and behavior.
221
+
222
+ Parameters:
223
+ config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.
224
+ Initializing with a config file does not load the weights associated with the model, only the
225
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
226
+ """
227
+
228
+ FLAUBERT_INPUTS_DOCSTRING = r"""
229
+ Args:
230
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
231
+ Indices of input sequence tokens in the vocabulary.
232
+
233
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
234
+ [`PreTrainedTokenizer.__call__`] for details.
235
+
236
+ [What are input IDs?](../glossary#input-ids)
237
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
238
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
239
+
240
+ - 1 for tokens that are **not masked**,
241
+ - 0 for tokens that are **masked**.
242
+
243
+ [What are attention masks?](../glossary#attention-mask)
244
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
245
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
246
+ 1]`:
247
+
248
+ - 0 corresponds to a *sentence A* token,
249
+ - 1 corresponds to a *sentence B* token.
250
+
251
+ [What are token type IDs?](../glossary#token-type-ids)
252
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
253
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
254
+ config.max_position_embeddings - 1]`.
255
+
256
+ [What are position IDs?](../glossary#position-ids)
257
+ lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
258
+ Length of each sentence that can be used to avoid performing attention on padding token indices. You can
259
+ also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
260
+ `[0, ..., input_ids.size(-1)]`:
261
+ cache (`Dict[str, torch.FloatTensor]`, *optional*):
262
+ Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
263
+ attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
264
+ decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
265
+ hidden-states.
266
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
267
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
268
+
269
+ - 1 indicates the head is **not masked**,
270
+ - 0 indicates the head is **masked**.
271
+
272
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
273
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
274
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
275
+ model's internal embedding lookup matrix.
276
+ output_attentions (`bool`, *optional*):
277
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
278
+ tensors for more detail.
279
+ output_hidden_states (`bool`, *optional*):
280
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
281
+ more detail.
282
+ return_dict (`bool`, *optional*):
283
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
284
+ """
285
+
286
+
287
+ @add_start_docstrings(
288
+ "The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
289
+ FLAUBERT_START_DOCSTRING,
290
+ )
291
+ # Copied from transformers.models.xlm.modeling_xlm.XLMPredLayer with XLM->Flaubert
292
+ class FlaubertPredLayer(nn.Module):
293
+ """
294
+ Prediction layer (cross_entropy or adaptive_softmax).
295
+ """
296
+
297
+ def __init__(self, config):
298
+ super().__init__()
299
+ self.asm = config.asm
300
+ self.n_words = config.n_words
301
+ self.pad_index = config.pad_index
302
+ dim = config.emb_dim
303
+
304
+ if config.asm is False:
305
+ self.proj = nn.Linear(dim, config.n_words, bias=True)
306
+ else:
307
+ self.proj = nn.AdaptiveLogSoftmaxWithLoss(
308
+ in_features=dim,
309
+ n_classes=config.n_words,
310
+ cutoffs=config.asm_cutoffs,
311
+ div_value=config.asm_div_value,
312
+ head_bias=True, # default is False
313
+ )
314
+
315
+ def forward(self, x, y=None):
316
+ """Compute the loss, and optionally the scores."""
317
+ outputs = ()
318
+ if self.asm is False:
319
+ scores = self.proj(x)
320
+ outputs = (scores,) + outputs
321
+ if y is not None:
322
+ loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean")
323
+ outputs = (loss,) + outputs
324
+ else:
325
+ scores = self.proj.log_prob(x)
326
+ outputs = (scores,) + outputs
327
+ if y is not None:
328
+ _, loss = self.proj(x, y)
329
+ outputs = (loss,) + outputs
330
+
331
+ return outputs
332
+
333
+
334
+ # Copied from transformers.models.xlm.modeling_xlm.XLMPreTrainedModel with XLM->Flaubert
335
+ class FlaubertPreTrainedModel(PreTrainedModel):
336
+ """
337
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
338
+ models.
339
+ """
340
+
341
+ config_class = FlaubertConfig
342
+ load_tf_weights = None
343
+ base_model_prefix = "transformer"
344
+
345
+ def __init__(self, *inputs, **kwargs):
346
+ super().__init__(*inputs, **kwargs)
347
+
348
+ @property
349
+ def dummy_inputs(self):
350
+ inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
351
+ attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
352
+ if self.config.use_lang_emb and self.config.n_langs > 1:
353
+ langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
354
+ else:
355
+ langs_list = None
356
+ return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
357
+
358
+ def _init_weights(self, module):
359
+ """Initialize the weights."""
360
+ if isinstance(module, nn.Embedding):
361
+ if self.config is not None and self.config.embed_init_std is not None:
362
+ nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
363
+ if module.padding_idx is not None:
364
+ module.weight.data[module.padding_idx].zero_()
365
+ if isinstance(module, nn.Linear):
366
+ if self.config is not None and self.config.init_std is not None:
367
+ nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
368
+ if module.bias is not None:
369
+ nn.init.constant_(module.bias, 0.0)
370
+ if isinstance(module, nn.LayerNorm):
371
+ module.bias.data.zero_()
372
+ module.weight.data.fill_(1.0)
373
+ if isinstance(module, FlaubertModel) and self.config.sinusoidal_embeddings:
374
+ create_sinusoidal_embeddings(
375
+ self.config.max_position_embeddings, self.config.emb_dim, out=module.position_embeddings.weight
376
+ )
377
+
378
+
379
+ class FlaubertModel(FlaubertPreTrainedModel):
380
+ def __init__(self, config): # , dico, is_encoder, with_output):
381
+ super().__init__(config)
382
+
383
+ # encoder / decoder, output layer
384
+ self.is_encoder = config.is_encoder
385
+ self.is_decoder = not config.is_encoder
386
+ if self.is_decoder:
387
+ raise NotImplementedError("Currently Flaubert can only be used as an encoder")
388
+ # self.with_output = with_output
389
+ self.causal = config.causal
390
+
391
+ # dictionary / languages
392
+ self.n_langs = config.n_langs
393
+ self.use_lang_emb = config.use_lang_emb
394
+ self.n_words = config.n_words
395
+ self.eos_index = config.eos_index
396
+ self.pad_index = config.pad_index
397
+ # self.dico = dico
398
+ # self.id2lang = config.id2lang
399
+ # self.lang2id = config.lang2id
400
+ # assert len(self.dico) == self.n_words
401
+ # assert len(self.id2lang) == len(self.lang2id) == self.n_langs
402
+
403
+ # model parameters
404
+ self.dim = config.emb_dim # 512 by default
405
+ self.hidden_dim = self.dim * 4 # 2048 by default
406
+ self.n_heads = config.n_heads # 8 by default
407
+ self.n_layers = config.n_layers
408
+ self.dropout = config.dropout
409
+ self.attention_dropout = config.attention_dropout
410
+ assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
411
+
412
+ # embeddings
413
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
414
+ if config.n_langs > 1 and config.use_lang_emb:
415
+ self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
416
+ self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
417
+ self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
418
+
419
+ # transformer layers
420
+ self.attentions = nn.ModuleList()
421
+ self.layer_norm1 = nn.ModuleList()
422
+ self.ffns = nn.ModuleList()
423
+ self.layer_norm2 = nn.ModuleList()
424
+ # if self.is_decoder:
425
+ # self.layer_norm15 = nn.ModuleList()
426
+ # self.encoder_attn = nn.ModuleList()
427
+
428
+ for _ in range(self.n_layers):
429
+ self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
430
+ self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
431
+ # if self.is_decoder:
432
+ # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
433
+ # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
434
+ self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
435
+ self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
436
+
437
+ if hasattr(config, "pruned_heads"):
438
+ pruned_heads = config.pruned_heads.copy().items()
439
+ config.pruned_heads = {}
440
+ for layer, heads in pruned_heads:
441
+ if self.attentions[int(layer)].n_heads == config.n_heads:
442
+ self.prune_heads({int(layer): list(map(int, heads))})
443
+
444
+ # Initialize weights and apply final processing
445
+ self.post_init()
446
+
447
+ self.layerdrop = getattr(config, "layerdrop", 0.0)
448
+ self.pre_norm = getattr(config, "pre_norm", False)
449
+ self.register_buffer(
450
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
451
+ )
452
+
453
+ # Copied from transformers.models.xlm.modeling_xlm.XLMModel.get_input_embeddings
454
+ def get_input_embeddings(self):
455
+ return self.embeddings
456
+
457
+ # Copied from transformers.models.xlm.modeling_xlm.XLMModel.set_input_embeddings
458
+ def set_input_embeddings(self, new_embeddings):
459
+ self.embeddings = new_embeddings
460
+
461
+ # Copied from transformers.models.xlm.modeling_xlm.XLMModel._prune_heads
462
+ def _prune_heads(self, heads_to_prune):
463
+ """
464
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
465
+ class PreTrainedModel
466
+ """
467
+ for layer, heads in heads_to_prune.items():
468
+ self.attentions[layer].prune_heads(heads)
469
+
470
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
471
+ @add_code_sample_docstrings(
472
+ checkpoint=_CHECKPOINT_FOR_DOC,
473
+ output_type=BaseModelOutput,
474
+ config_class=_CONFIG_FOR_DOC,
475
+ )
476
+ def forward(
477
+ self,
478
+ input_ids: Optional[torch.LongTensor] = None,
479
+ attention_mask: Optional[torch.FloatTensor] = None,
480
+ langs: Optional[torch.Tensor] = None,
481
+ token_type_ids: Optional[torch.LongTensor] = None,
482
+ position_ids: Optional[torch.LongTensor] = None,
483
+ lengths: Optional[torch.LongTensor] = None,
484
+ cache: Optional[Dict[str, torch.FloatTensor]] = None,
485
+ head_mask: Optional[torch.FloatTensor] = None,
486
+ inputs_embeds: Optional[torch.FloatTensor] = None,
487
+ output_attentions: Optional[bool] = None,
488
+ output_hidden_states: Optional[bool] = None,
489
+ return_dict: Optional[bool] = None,
490
+ ) -> Union[Tuple, BaseModelOutput]:
491
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
492
+ output_hidden_states = (
493
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
494
+ )
495
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
496
+
497
+ # removed: src_enc=None, src_len=None
498
+ if input_ids is not None:
499
+ bs, slen = input_ids.size()
500
+ else:
501
+ bs, slen = inputs_embeds.size()[:-1]
502
+
503
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
504
+
505
+ if lengths is None:
506
+ if input_ids is not None:
507
+ lengths = (input_ids != self.pad_index).sum(dim=1).long()
508
+ else:
509
+ lengths = torch.tensor([slen] * bs, device=device)
510
+ # mask = input_ids != self.pad_index
511
+
512
+ # check inputs
513
+ assert lengths.size(0) == bs
514
+ assert lengths.max().item() <= slen
515
+ # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
516
+ # assert (src_enc is None) == (src_len is None)
517
+ # if src_enc is not None:
518
+ # assert self.is_decoder
519
+ # assert src_enc.size(0) == bs
520
+
521
+ # generate masks
522
+ mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
523
+ # if self.is_decoder and src_enc is not None:
524
+ # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
525
+
526
+ # Setting the position-ids to the registered buffer in constructor, it helps
527
+ # when tracing the model without passing position-ids, solves
528
+ # isues similar to issue #5664
529
+ if position_ids is None:
530
+ if hasattr(self, "position_ids"):
531
+ position_ids = self.position_ids[:, :slen]
532
+ position_ids = position_ids.expand((bs, slen))
533
+ else:
534
+ position_ids = torch.arange(slen, dtype=torch.long, device=device)
535
+ position_ids = position_ids.unsqueeze(0).expand((bs, slen))
536
+ else:
537
+ assert position_ids.size() == (bs, slen) # (slen, bs)
538
+ # position_ids = position_ids.transpose(0, 1)
539
+
540
+ # langs
541
+ if langs is not None:
542
+ assert langs.size() == (bs, slen) # (slen, bs)
543
+ # langs = langs.transpose(0, 1)
544
+
545
+ # Prepare head mask if needed
546
+ head_mask = self.get_head_mask(head_mask, self.config.n_layers)
547
+
548
+ # do not recompute cached elements
549
+ if cache is not None and input_ids is not None:
550
+ _slen = slen - cache["slen"]
551
+ input_ids = input_ids[:, -_slen:]
552
+ position_ids = position_ids[:, -_slen:]
553
+ if langs is not None:
554
+ langs = langs[:, -_slen:]
555
+ mask = mask[:, -_slen:]
556
+ attn_mask = attn_mask[:, -_slen:]
557
+
558
+ # embeddings
559
+ if inputs_embeds is None:
560
+ inputs_embeds = self.embeddings(input_ids)
561
+
562
+ tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
563
+ if langs is not None and self.use_lang_emb and self.config.n_langs > 1:
564
+ tensor = tensor + self.lang_embeddings(langs)
565
+ if token_type_ids is not None:
566
+ tensor = tensor + self.embeddings(token_type_ids)
567
+ tensor = self.layer_norm_emb(tensor)
568
+ tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
569
+ tensor *= mask.unsqueeze(-1).to(tensor.dtype)
570
+
571
+ # transformer layers
572
+ hidden_states = () if output_hidden_states else None
573
+ attentions = () if output_attentions else None
574
+ for i in range(self.n_layers):
575
+ # LayerDrop
576
+ if self.training:
577
+ dropout_probability = torch.rand([])
578
+ if dropout_probability < self.layerdrop:
579
+ continue
580
+
581
+ if output_hidden_states:
582
+ hidden_states = hidden_states + (tensor,)
583
+
584
+ # self attention
585
+ if not self.pre_norm:
586
+ attn_outputs = self.attentions[i](
587
+ tensor,
588
+ attn_mask,
589
+ cache=cache,
590
+ head_mask=head_mask[i],
591
+ output_attentions=output_attentions,
592
+ )
593
+ attn = attn_outputs[0]
594
+ if output_attentions:
595
+ attentions = attentions + (attn_outputs[1],)
596
+ attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
597
+ tensor = tensor + attn
598
+ tensor = self.layer_norm1[i](tensor)
599
+ else:
600
+ tensor_normalized = self.layer_norm1[i](tensor)
601
+ attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache, head_mask=head_mask[i])
602
+ attn = attn_outputs[0]
603
+ if output_attentions:
604
+ attentions = attentions + (attn_outputs[1],)
605
+ attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
606
+ tensor = tensor + attn
607
+
608
+ # encoder attention (for decoder only)
609
+ # if self.is_decoder and src_enc is not None:
610
+ # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
611
+ # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
612
+ # tensor = tensor + attn
613
+ # tensor = self.layer_norm15[i](tensor)
614
+
615
+ # FFN
616
+ if not self.pre_norm:
617
+ tensor = tensor + self.ffns[i](tensor)
618
+ tensor = self.layer_norm2[i](tensor)
619
+ else:
620
+ tensor_normalized = self.layer_norm2[i](tensor)
621
+ tensor = tensor + self.ffns[i](tensor_normalized)
622
+
623
+ tensor *= mask.unsqueeze(-1).to(tensor.dtype)
624
+
625
+ # Add last hidden state
626
+ if output_hidden_states:
627
+ hidden_states = hidden_states + (tensor,)
628
+
629
+ # update cache length
630
+ if cache is not None:
631
+ cache["slen"] += tensor.size(1)
632
+
633
+ # move back sequence length to dimension 0
634
+ # tensor = tensor.transpose(0, 1)
635
+
636
+ if not return_dict:
637
+ return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
638
+
639
+ return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
640
+
641
+
642
+ @add_start_docstrings(
643
+ """
644
+ The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
645
+ embeddings).
646
+ """,
647
+ FLAUBERT_START_DOCSTRING,
648
+ )
649
+ # Copied transformers.models.xlm.modeling_xlm.XLMWithLMHeadModel with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
650
+ class FlaubertWithLMHeadModel(FlaubertPreTrainedModel):
651
+ _tied_weights_keys = ["pred_layer.proj.weight"]
652
+
653
+ def __init__(self, config):
654
+ super().__init__(config)
655
+ self.transformer = FlaubertModel(config)
656
+ self.pred_layer = FlaubertPredLayer(config)
657
+
658
+ # Initialize weights and apply final processing
659
+ self.post_init()
660
+
661
+ def get_output_embeddings(self):
662
+ return self.pred_layer.proj
663
+
664
+ def set_output_embeddings(self, new_embeddings):
665
+ self.pred_layer.proj = new_embeddings
666
+
667
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
668
+ mask_token_id = self.config.mask_token_id
669
+ lang_id = self.config.lang_id
670
+
671
+ effective_batch_size = input_ids.shape[0]
672
+ mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
673
+ input_ids = torch.cat([input_ids, mask_token], dim=1)
674
+ if lang_id is not None:
675
+ langs = torch.full_like(input_ids, lang_id)
676
+ else:
677
+ langs = None
678
+ return {"input_ids": input_ids, "langs": langs}
679
+
680
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
681
+ @add_code_sample_docstrings(
682
+ checkpoint=_CHECKPOINT_FOR_DOC,
683
+ output_type=MaskedLMOutput,
684
+ config_class=_CONFIG_FOR_DOC,
685
+ mask="<special1>",
686
+ )
687
+ def forward(
688
+ self,
689
+ input_ids: Optional[torch.Tensor] = None,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ langs: Optional[torch.Tensor] = None,
692
+ token_type_ids: Optional[torch.Tensor] = None,
693
+ position_ids: Optional[torch.Tensor] = None,
694
+ lengths: Optional[torch.Tensor] = None,
695
+ cache: Optional[Dict[str, torch.Tensor]] = None,
696
+ head_mask: Optional[torch.Tensor] = None,
697
+ inputs_embeds: Optional[torch.Tensor] = None,
698
+ labels: Optional[torch.Tensor] = None,
699
+ output_attentions: Optional[bool] = None,
700
+ output_hidden_states: Optional[bool] = None,
701
+ return_dict: Optional[bool] = None,
702
+ ) -> Union[Tuple, MaskedLMOutput]:
703
+ r"""
704
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
705
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
706
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
707
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
708
+ """
709
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
710
+
711
+ transformer_outputs = self.transformer(
712
+ input_ids,
713
+ attention_mask=attention_mask,
714
+ langs=langs,
715
+ token_type_ids=token_type_ids,
716
+ position_ids=position_ids,
717
+ lengths=lengths,
718
+ cache=cache,
719
+ head_mask=head_mask,
720
+ inputs_embeds=inputs_embeds,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ )
725
+
726
+ output = transformer_outputs[0]
727
+ outputs = self.pred_layer(output, labels) # (loss, logits) or (logits,) depending on if labels are provided.
728
+
729
+ if not return_dict:
730
+ return outputs + transformer_outputs[1:]
731
+
732
+ return MaskedLMOutput(
733
+ loss=outputs[0] if labels is not None else None,
734
+ logits=outputs[0] if labels is None else outputs[1],
735
+ hidden_states=transformer_outputs.hidden_states,
736
+ attentions=transformer_outputs.attentions,
737
+ )
738
+
739
+
740
+ @add_start_docstrings(
741
+ """
742
+ Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
743
+ e.g. for GLUE tasks.
744
+ """,
745
+ FLAUBERT_START_DOCSTRING,
746
+ )
747
+ # Copied transformers.models.xlm.modeling_xlm.XLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
748
+ class FlaubertForSequenceClassification(FlaubertPreTrainedModel):
749
+ def __init__(self, config):
750
+ super().__init__(config)
751
+ self.num_labels = config.num_labels
752
+ self.config = config
753
+
754
+ self.transformer = FlaubertModel(config)
755
+ self.sequence_summary = SequenceSummary(config)
756
+
757
+ # Initialize weights and apply final processing
758
+ self.post_init()
759
+
760
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
761
+ @add_code_sample_docstrings(
762
+ checkpoint=_CHECKPOINT_FOR_DOC,
763
+ output_type=SequenceClassifierOutput,
764
+ config_class=_CONFIG_FOR_DOC,
765
+ )
766
+ def forward(
767
+ self,
768
+ input_ids: Optional[torch.Tensor] = None,
769
+ attention_mask: Optional[torch.Tensor] = None,
770
+ langs: Optional[torch.Tensor] = None,
771
+ token_type_ids: Optional[torch.Tensor] = None,
772
+ position_ids: Optional[torch.Tensor] = None,
773
+ lengths: Optional[torch.Tensor] = None,
774
+ cache: Optional[Dict[str, torch.Tensor]] = None,
775
+ head_mask: Optional[torch.Tensor] = None,
776
+ inputs_embeds: Optional[torch.Tensor] = None,
777
+ labels: Optional[torch.Tensor] = None,
778
+ output_attentions: Optional[bool] = None,
779
+ output_hidden_states: Optional[bool] = None,
780
+ return_dict: Optional[bool] = None,
781
+ ) -> Union[Tuple, SequenceClassifierOutput]:
782
+ r"""
783
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
784
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
785
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
786
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
787
+ """
788
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
789
+
790
+ transformer_outputs = self.transformer(
791
+ input_ids,
792
+ attention_mask=attention_mask,
793
+ langs=langs,
794
+ token_type_ids=token_type_ids,
795
+ position_ids=position_ids,
796
+ lengths=lengths,
797
+ cache=cache,
798
+ head_mask=head_mask,
799
+ inputs_embeds=inputs_embeds,
800
+ output_attentions=output_attentions,
801
+ output_hidden_states=output_hidden_states,
802
+ return_dict=return_dict,
803
+ )
804
+
805
+ output = transformer_outputs[0]
806
+ logits = self.sequence_summary(output)
807
+
808
+ loss = None
809
+ if labels is not None:
810
+ if self.config.problem_type is None:
811
+ if self.num_labels == 1:
812
+ self.config.problem_type = "regression"
813
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
814
+ self.config.problem_type = "single_label_classification"
815
+ else:
816
+ self.config.problem_type = "multi_label_classification"
817
+
818
+ if self.config.problem_type == "regression":
819
+ loss_fct = MSELoss()
820
+ if self.num_labels == 1:
821
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
822
+ else:
823
+ loss = loss_fct(logits, labels)
824
+ elif self.config.problem_type == "single_label_classification":
825
+ loss_fct = CrossEntropyLoss()
826
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
827
+ elif self.config.problem_type == "multi_label_classification":
828
+ loss_fct = BCEWithLogitsLoss()
829
+ loss = loss_fct(logits, labels)
830
+
831
+ if not return_dict:
832
+ output = (logits,) + transformer_outputs[1:]
833
+ return ((loss,) + output) if loss is not None else output
834
+
835
+ return SequenceClassifierOutput(
836
+ loss=loss,
837
+ logits=logits,
838
+ hidden_states=transformer_outputs.hidden_states,
839
+ attentions=transformer_outputs.attentions,
840
+ )
841
+
842
+
843
+ @add_start_docstrings(
844
+ """
845
+ Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
846
+ Named-Entity-Recognition (NER) tasks.
847
+ """,
848
+ FLAUBERT_START_DOCSTRING,
849
+ )
850
+ # Copied from transformers.models.xlm.modeling_xlm.XLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
851
+ class FlaubertForTokenClassification(FlaubertPreTrainedModel):
852
+ def __init__(self, config):
853
+ super().__init__(config)
854
+ self.num_labels = config.num_labels
855
+
856
+ self.transformer = FlaubertModel(config)
857
+ self.dropout = nn.Dropout(config.dropout)
858
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
859
+
860
+ # Initialize weights and apply final processing
861
+ self.post_init()
862
+
863
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
864
+ @add_code_sample_docstrings(
865
+ checkpoint=_CHECKPOINT_FOR_DOC,
866
+ output_type=TokenClassifierOutput,
867
+ config_class=_CONFIG_FOR_DOC,
868
+ )
869
+ def forward(
870
+ self,
871
+ input_ids: Optional[torch.Tensor] = None,
872
+ attention_mask: Optional[torch.Tensor] = None,
873
+ langs: Optional[torch.Tensor] = None,
874
+ token_type_ids: Optional[torch.Tensor] = None,
875
+ position_ids: Optional[torch.Tensor] = None,
876
+ lengths: Optional[torch.Tensor] = None,
877
+ cache: Optional[Dict[str, torch.Tensor]] = None,
878
+ head_mask: Optional[torch.Tensor] = None,
879
+ inputs_embeds: Optional[torch.Tensor] = None,
880
+ labels: Optional[torch.Tensor] = None,
881
+ output_attentions: Optional[bool] = None,
882
+ output_hidden_states: Optional[bool] = None,
883
+ return_dict: Optional[bool] = None,
884
+ ) -> Union[Tuple, TokenClassifierOutput]:
885
+ r"""
886
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
887
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
888
+ """
889
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
890
+
891
+ outputs = self.transformer(
892
+ input_ids,
893
+ attention_mask=attention_mask,
894
+ langs=langs,
895
+ token_type_ids=token_type_ids,
896
+ position_ids=position_ids,
897
+ lengths=lengths,
898
+ cache=cache,
899
+ head_mask=head_mask,
900
+ inputs_embeds=inputs_embeds,
901
+ output_attentions=output_attentions,
902
+ output_hidden_states=output_hidden_states,
903
+ return_dict=return_dict,
904
+ )
905
+
906
+ sequence_output = outputs[0]
907
+
908
+ sequence_output = self.dropout(sequence_output)
909
+ logits = self.classifier(sequence_output)
910
+
911
+ loss = None
912
+ if labels is not None:
913
+ loss_fct = CrossEntropyLoss()
914
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
915
+
916
+ if not return_dict:
917
+ output = (logits,) + outputs[1:]
918
+ return ((loss,) + output) if loss is not None else output
919
+
920
+ return TokenClassifierOutput(
921
+ loss=loss,
922
+ logits=logits,
923
+ hidden_states=outputs.hidden_states,
924
+ attentions=outputs.attentions,
925
+ )
926
+
927
+
928
+ @add_start_docstrings(
929
+ """
930
+ Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
931
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
932
+ """,
933
+ FLAUBERT_START_DOCSTRING,
934
+ )
935
+ # Copied from transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
936
+ class FlaubertForQuestionAnsweringSimple(FlaubertPreTrainedModel):
937
+ def __init__(self, config):
938
+ super().__init__(config)
939
+
940
+ self.transformer = FlaubertModel(config)
941
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
942
+
943
+ # Initialize weights and apply final processing
944
+ self.post_init()
945
+
946
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
947
+ @add_code_sample_docstrings(
948
+ checkpoint=_CHECKPOINT_FOR_DOC,
949
+ output_type=QuestionAnsweringModelOutput,
950
+ config_class=_CONFIG_FOR_DOC,
951
+ )
952
+ def forward(
953
+ self,
954
+ input_ids: Optional[torch.Tensor] = None,
955
+ attention_mask: Optional[torch.Tensor] = None,
956
+ langs: Optional[torch.Tensor] = None,
957
+ token_type_ids: Optional[torch.Tensor] = None,
958
+ position_ids: Optional[torch.Tensor] = None,
959
+ lengths: Optional[torch.Tensor] = None,
960
+ cache: Optional[Dict[str, torch.Tensor]] = None,
961
+ head_mask: Optional[torch.Tensor] = None,
962
+ inputs_embeds: Optional[torch.Tensor] = None,
963
+ start_positions: Optional[torch.Tensor] = None,
964
+ end_positions: Optional[torch.Tensor] = None,
965
+ output_attentions: Optional[bool] = None,
966
+ output_hidden_states: Optional[bool] = None,
967
+ return_dict: Optional[bool] = None,
968
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
969
+ r"""
970
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
971
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
972
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
973
+ are not taken into account for computing the loss.
974
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
975
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
976
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
977
+ are not taken into account for computing the loss.
978
+ """
979
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
980
+
981
+ transformer_outputs = self.transformer(
982
+ input_ids,
983
+ attention_mask=attention_mask,
984
+ langs=langs,
985
+ token_type_ids=token_type_ids,
986
+ position_ids=position_ids,
987
+ lengths=lengths,
988
+ cache=cache,
989
+ head_mask=head_mask,
990
+ inputs_embeds=inputs_embeds,
991
+ output_attentions=output_attentions,
992
+ output_hidden_states=output_hidden_states,
993
+ return_dict=return_dict,
994
+ )
995
+
996
+ sequence_output = transformer_outputs[0]
997
+
998
+ logits = self.qa_outputs(sequence_output)
999
+ start_logits, end_logits = logits.split(1, dim=-1)
1000
+ start_logits = start_logits.squeeze(-1).contiguous()
1001
+ end_logits = end_logits.squeeze(-1).contiguous()
1002
+
1003
+ total_loss = None
1004
+ if start_positions is not None and end_positions is not None:
1005
+ # If we are on multi-GPU, split add a dimension
1006
+ if len(start_positions.size()) > 1:
1007
+ start_positions = start_positions.squeeze(-1)
1008
+ if len(end_positions.size()) > 1:
1009
+ end_positions = end_positions.squeeze(-1)
1010
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1011
+ ignored_index = start_logits.size(1)
1012
+ start_positions = start_positions.clamp(0, ignored_index)
1013
+ end_positions = end_positions.clamp(0, ignored_index)
1014
+
1015
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1016
+ start_loss = loss_fct(start_logits, start_positions)
1017
+ end_loss = loss_fct(end_logits, end_positions)
1018
+ total_loss = (start_loss + end_loss) / 2
1019
+
1020
+ if not return_dict:
1021
+ output = (start_logits, end_logits) + transformer_outputs[1:]
1022
+ return ((total_loss,) + output) if total_loss is not None else output
1023
+
1024
+ return QuestionAnsweringModelOutput(
1025
+ loss=total_loss,
1026
+ start_logits=start_logits,
1027
+ end_logits=end_logits,
1028
+ hidden_states=transformer_outputs.hidden_states,
1029
+ attentions=transformer_outputs.attentions,
1030
+ )
1031
+
1032
+
1033
+ @add_start_docstrings(
1034
+ """
1035
+ Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like
1036
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1037
+ """,
1038
+ FLAUBERT_START_DOCSTRING,
1039
+ )
1040
+ @dataclass
1041
+ # Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput with XLM->Flaubert
1042
+ class FlaubertForQuestionAnsweringOutput(ModelOutput):
1043
+ """
1044
+ Base class for outputs of question answering models using a `SquadHead`.
1045
+
1046
+ Args:
1047
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
1048
+ Classification loss as the sum of start token, end token (and is_impossible if provided) classification
1049
+ losses.
1050
+ start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1051
+ Log probabilities for the top config.start_n_top start token possibilities (beam-search).
1052
+ start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1053
+ Indices for the top config.start_n_top start token possibilities (beam-search).
1054
+ end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1055
+ Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
1056
+ (beam-search).
1057
+ end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1058
+ Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
1059
+ cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1060
+ Log probabilities for the `is_impossible` label of the answers.
1061
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1062
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
1063
+ shape `(batch_size, sequence_length, hidden_size)`.
1064
+
1065
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1066
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1067
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1068
+ sequence_length)`.
1069
+
1070
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1071
+ heads.
1072
+ """
1073
+
1074
+ loss: Optional[torch.FloatTensor] = None
1075
+ start_top_log_probs: Optional[torch.FloatTensor] = None
1076
+ start_top_index: Optional[torch.LongTensor] = None
1077
+ end_top_log_probs: Optional[torch.FloatTensor] = None
1078
+ end_top_index: Optional[torch.LongTensor] = None
1079
+ cls_logits: Optional[torch.FloatTensor] = None
1080
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
1081
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
1082
+
1083
+
1084
+ # Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnswering with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1085
+ class FlaubertForQuestionAnswering(FlaubertPreTrainedModel):
1086
+ def __init__(self, config):
1087
+ super().__init__(config)
1088
+
1089
+ self.transformer = FlaubertModel(config)
1090
+ self.qa_outputs = SQuADHead(config)
1091
+
1092
+ # Initialize weights and apply final processing
1093
+ self.post_init()
1094
+
1095
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1096
+ @replace_return_docstrings(output_type=FlaubertForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
1097
+ def forward(
1098
+ self,
1099
+ input_ids: Optional[torch.Tensor] = None,
1100
+ attention_mask: Optional[torch.Tensor] = None,
1101
+ langs: Optional[torch.Tensor] = None,
1102
+ token_type_ids: Optional[torch.Tensor] = None,
1103
+ position_ids: Optional[torch.Tensor] = None,
1104
+ lengths: Optional[torch.Tensor] = None,
1105
+ cache: Optional[Dict[str, torch.Tensor]] = None,
1106
+ head_mask: Optional[torch.Tensor] = None,
1107
+ inputs_embeds: Optional[torch.Tensor] = None,
1108
+ start_positions: Optional[torch.Tensor] = None,
1109
+ end_positions: Optional[torch.Tensor] = None,
1110
+ is_impossible: Optional[torch.Tensor] = None,
1111
+ cls_index: Optional[torch.Tensor] = None,
1112
+ p_mask: Optional[torch.Tensor] = None,
1113
+ output_attentions: Optional[bool] = None,
1114
+ output_hidden_states: Optional[bool] = None,
1115
+ return_dict: Optional[bool] = None,
1116
+ ) -> Union[Tuple, FlaubertForQuestionAnsweringOutput]:
1117
+ r"""
1118
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1119
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1120
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1121
+ are not taken into account for computing the loss.
1122
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1123
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1124
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1125
+ are not taken into account for computing the loss.
1126
+ is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1127
+ Labels whether a question has an answer or no answer (SQuAD 2.0)
1128
+ cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1129
+ Labels for position (index) of the classification token to use as input for computing plausibility of the
1130
+ answer.
1131
+ p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1132
+ Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
1133
+ masked. 0.0 mean token is not masked.
1134
+
1135
+ Returns:
1136
+
1137
+ Example:
1138
+
1139
+ ```python
1140
+ >>> from transformers import XLMTokenizer, XLMForQuestionAnswering
1141
+ >>> import torch
1142
+
1143
+ >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
1144
+ >>> model = XLMForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
1145
+
1146
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
1147
+ ... 0
1148
+ ... ) # Batch size 1
1149
+ >>> start_positions = torch.tensor([1])
1150
+ >>> end_positions = torch.tensor([3])
1151
+
1152
+ >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
1153
+ >>> loss = outputs.loss
1154
+ ```"""
1155
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1156
+
1157
+ transformer_outputs = self.transformer(
1158
+ input_ids,
1159
+ attention_mask=attention_mask,
1160
+ langs=langs,
1161
+ token_type_ids=token_type_ids,
1162
+ position_ids=position_ids,
1163
+ lengths=lengths,
1164
+ cache=cache,
1165
+ head_mask=head_mask,
1166
+ inputs_embeds=inputs_embeds,
1167
+ output_attentions=output_attentions,
1168
+ output_hidden_states=output_hidden_states,
1169
+ return_dict=return_dict,
1170
+ )
1171
+
1172
+ output = transformer_outputs[0]
1173
+
1174
+ outputs = self.qa_outputs(
1175
+ output,
1176
+ start_positions=start_positions,
1177
+ end_positions=end_positions,
1178
+ cls_index=cls_index,
1179
+ is_impossible=is_impossible,
1180
+ p_mask=p_mask,
1181
+ return_dict=return_dict,
1182
+ )
1183
+
1184
+ if not return_dict:
1185
+ return outputs + transformer_outputs[1:]
1186
+
1187
+ return FlaubertForQuestionAnsweringOutput(
1188
+ loss=outputs.loss,
1189
+ start_top_log_probs=outputs.start_top_log_probs,
1190
+ start_top_index=outputs.start_top_index,
1191
+ end_top_log_probs=outputs.end_top_log_probs,
1192
+ end_top_index=outputs.end_top_index,
1193
+ cls_logits=outputs.cls_logits,
1194
+ hidden_states=transformer_outputs.hidden_states,
1195
+ attentions=transformer_outputs.attentions,
1196
+ )
1197
+
1198
+
1199
+ @add_start_docstrings(
1200
+ """
1201
+ Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1202
+ softmax) e.g. for RocStories/SWAG tasks.
1203
+ """,
1204
+ FLAUBERT_START_DOCSTRING,
1205
+ )
1206
+ # Copied from transformer.models.xlm.modeling_xlm.XLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1207
+ class FlaubertForMultipleChoice(FlaubertPreTrainedModel):
1208
+ def __init__(self, config, *inputs, **kwargs):
1209
+ super().__init__(config, *inputs, **kwargs)
1210
+
1211
+ self.transformer = FlaubertModel(config)
1212
+ self.sequence_summary = SequenceSummary(config)
1213
+ self.logits_proj = nn.Linear(config.num_labels, 1)
1214
+
1215
+ # Initialize weights and apply final processing
1216
+ self.post_init()
1217
+
1218
+ @add_start_docstrings_to_model_forward(
1219
+ FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1220
+ )
1221
+ @add_code_sample_docstrings(
1222
+ checkpoint=_CHECKPOINT_FOR_DOC,
1223
+ output_type=MultipleChoiceModelOutput,
1224
+ config_class=_CONFIG_FOR_DOC,
1225
+ )
1226
+ def forward(
1227
+ self,
1228
+ input_ids: Optional[torch.Tensor] = None,
1229
+ attention_mask: Optional[torch.Tensor] = None,
1230
+ langs: Optional[torch.Tensor] = None,
1231
+ token_type_ids: Optional[torch.Tensor] = None,
1232
+ position_ids: Optional[torch.Tensor] = None,
1233
+ lengths: Optional[torch.Tensor] = None,
1234
+ cache: Optional[Dict[str, torch.Tensor]] = None,
1235
+ head_mask: Optional[torch.Tensor] = None,
1236
+ inputs_embeds: Optional[torch.Tensor] = None,
1237
+ labels: Optional[torch.Tensor] = None,
1238
+ output_attentions: Optional[bool] = None,
1239
+ output_hidden_states: Optional[bool] = None,
1240
+ return_dict: Optional[bool] = None,
1241
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1242
+ r"""
1243
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1244
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1245
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1246
+ `input_ids` above)
1247
+ """
1248
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1249
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1250
+
1251
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1252
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1253
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1254
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1255
+ langs = langs.view(-1, langs.size(-1)) if langs is not None else None
1256
+ inputs_embeds = (
1257
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1258
+ if inputs_embeds is not None
1259
+ else None
1260
+ )
1261
+
1262
+ if lengths is not None:
1263
+ logger.warning(
1264
+ "The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the "
1265
+ "attention mask instead."
1266
+ )
1267
+ lengths = None
1268
+
1269
+ transformer_outputs = self.transformer(
1270
+ input_ids=input_ids,
1271
+ attention_mask=attention_mask,
1272
+ langs=langs,
1273
+ token_type_ids=token_type_ids,
1274
+ position_ids=position_ids,
1275
+ lengths=lengths,
1276
+ cache=cache,
1277
+ head_mask=head_mask,
1278
+ inputs_embeds=inputs_embeds,
1279
+ output_attentions=output_attentions,
1280
+ output_hidden_states=output_hidden_states,
1281
+ return_dict=return_dict,
1282
+ )
1283
+ output = transformer_outputs[0]
1284
+ logits = self.sequence_summary(output)
1285
+ logits = self.logits_proj(logits)
1286
+ reshaped_logits = logits.view(-1, num_choices)
1287
+
1288
+ loss = None
1289
+ if labels is not None:
1290
+ loss_fct = CrossEntropyLoss()
1291
+ loss = loss_fct(reshaped_logits, labels)
1292
+
1293
+ if not return_dict:
1294
+ output = (reshaped_logits,) + transformer_outputs[1:]
1295
+ return ((loss,) + output) if loss is not None else output
1296
+
1297
+ return MultipleChoiceModelOutput(
1298
+ loss=loss,
1299
+ logits=reshaped_logits,
1300
+ hidden_states=transformer_outputs.hidden_states,
1301
+ attentions=transformer_outputs.attentions,
1302
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/modeling_tf_flaubert.py ADDED
@@ -0,0 +1,1337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ TF 2.0 Flaubert model.
17
+ """
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import itertools
23
+ import random
24
+ import warnings
25
+ from dataclasses import dataclass
26
+ from typing import Dict, Optional, Tuple, Union
27
+
28
+ import numpy as np
29
+ import tensorflow as tf
30
+
31
+ from ...activations_tf import get_tf_activation
32
+ from ...modeling_tf_outputs import (
33
+ TFBaseModelOutput,
34
+ TFMultipleChoiceModelOutput,
35
+ TFQuestionAnsweringModelOutput,
36
+ TFSequenceClassifierOutput,
37
+ TFTokenClassifierOutput,
38
+ )
39
+ from ...modeling_tf_utils import (
40
+ TFModelInputType,
41
+ TFMultipleChoiceLoss,
42
+ TFPreTrainedModel,
43
+ TFQuestionAnsweringLoss,
44
+ TFSequenceClassificationLoss,
45
+ TFSequenceSummary,
46
+ TFSharedEmbeddings,
47
+ TFTokenClassificationLoss,
48
+ get_initializer,
49
+ keras,
50
+ keras_serializable,
51
+ unpack_inputs,
52
+ )
53
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
54
+ from ...utils import (
55
+ MULTIPLE_CHOICE_DUMMY_INPUTS,
56
+ ModelOutput,
57
+ add_code_sample_docstrings,
58
+ add_start_docstrings,
59
+ add_start_docstrings_to_model_forward,
60
+ logging,
61
+ )
62
+ from .configuration_flaubert import FlaubertConfig
63
+
64
+
65
+ logger = logging.get_logger(__name__)
66
+
67
+ _CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased"
68
+ _CONFIG_FOR_DOC = "FlaubertConfig"
69
+
70
+
71
+ from ..deprecated._archive_maps import TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
72
+
73
+
74
+ FLAUBERT_START_DOCSTRING = r"""
75
+
76
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
77
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
78
+ etc.)
79
+
80
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
81
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
82
+ behavior.
83
+
84
+ <Tip>
85
+
86
+ TensorFlow models and layers in `transformers` accept two formats as input:
87
+
88
+ - having all inputs as keyword arguments (like PyTorch models), or
89
+ - having all inputs as a list, tuple or dict in the first positional argument.
90
+
91
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
92
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
93
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
94
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
95
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
96
+ positional argument:
97
+
98
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
99
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
100
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
101
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
102
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
103
+
104
+ Note that when creating models and layers with
105
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
106
+ about any of this, as you can just pass inputs like you would to any other Python function!
107
+
108
+ </Tip>
109
+
110
+ Parameters:
111
+ config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.
112
+ Initializing with a config file does not load the weights associated with the model, only the
113
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
114
+ """
115
+
116
+ FLAUBERT_INPUTS_DOCSTRING = r"""
117
+ Args:
118
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
119
+ Indices of input sequence tokens in the vocabulary.
120
+
121
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
122
+ [`PreTrainedTokenizer.encode`] for details.
123
+
124
+ [What are input IDs?](../glossary#input-ids)
125
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
126
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
127
+
128
+ - `1` for tokens that are **not masked**,
129
+ - `0` for tokens that are **masked**.
130
+
131
+ [What are attention masks?](../glossary#attention-mask)
132
+ langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
133
+ A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
134
+ languages ids which can be obtained from the language names by using two conversion mappings provided in
135
+ the configuration of the model (only provided for multilingual models). More precisely, the *language name
136
+ to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
137
+ *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
138
+
139
+ See usage examples detailed in the [multilingual documentation](../multilingual).
140
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
141
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
142
+ 1]`:
143
+
144
+ - `0` corresponds to a *sentence A* token,
145
+ - `1` corresponds to a *sentence B* token.
146
+
147
+ [What are token type IDs?](../glossary#token-type-ids)
148
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
149
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
150
+ config.max_position_embeddings - 1]`.
151
+
152
+ [What are position IDs?](../glossary#position-ids)
153
+ lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):
154
+ Length of each sentence that can be used to avoid performing attention on padding token indices. You can
155
+ also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in
156
+ `[0, ..., input_ids.size(-1)]`:
157
+ cache (`Dict[str, tf.Tensor]`, *optional*):
158
+ Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the
159
+ attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
160
+ decoding.
161
+
162
+ The dictionary object will be modified in-place during the forward pass to add newly computed
163
+ hidden-states.
164
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
165
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
166
+
167
+ - `1` indicates the head is **not masked**,
168
+ - `0` indicates the head is **masked**.
169
+
170
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
171
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
172
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
173
+ model's internal embedding lookup matrix.
174
+ output_attentions (`bool`, *optional*):
175
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
176
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
177
+ config will be used instead.
178
+ output_hidden_states (`bool`, *optional*):
179
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
180
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
181
+ used instead.
182
+ return_dict (`bool`, *optional*):
183
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
184
+ eager mode, in graph mode the value will always be set to True.
185
+ training (`bool`, *optional*, defaults to `False`):
186
+ Whether or not to use the model in training mode (some modules like dropout modules have different
187
+ behaviors between training and evaluation).
188
+ """
189
+
190
+
191
+ def get_masks(slen, lengths, causal, padding_mask=None):
192
+ """
193
+ Generate hidden states mask, and optionally an attention mask.
194
+ """
195
+ bs = shape_list(lengths)[0]
196
+ if padding_mask is not None:
197
+ mask = padding_mask
198
+ else:
199
+ # assert lengths.max().item() <= slen
200
+ alen = tf.range(slen, dtype=lengths.dtype)
201
+ mask = alen < tf.expand_dims(lengths, axis=1)
202
+
203
+ # attention mask is the same as mask, or triangular inferior attention (causal)
204
+ if causal:
205
+ attn_mask = tf.less_equal(
206
+ tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))
207
+ )
208
+ else:
209
+ attn_mask = mask
210
+
211
+ # sanity check
212
+ # assert shape_list(mask) == [bs, slen]
213
+ tf.debugging.assert_equal(shape_list(mask), [bs, slen])
214
+ if causal:
215
+ tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen])
216
+
217
+ return mask, attn_mask
218
+
219
+
220
+ class TFFlaubertPreTrainedModel(TFPreTrainedModel):
221
+ """
222
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
223
+ models.
224
+ """
225
+
226
+ config_class = FlaubertConfig
227
+ base_model_prefix = "transformer"
228
+
229
+ @property
230
+ def dummy_inputs(self):
231
+ # Sometimes Flaubert has language embeddings so don't forget to build them as well if needed
232
+ inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32)
233
+ attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32)
234
+ if self.config.use_lang_emb and self.config.n_langs > 1:
235
+ return {
236
+ "input_ids": inputs_list,
237
+ "attention_mask": attns_list,
238
+ "langs": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32),
239
+ }
240
+ else:
241
+ return {"input_ids": inputs_list, "attention_mask": attns_list}
242
+
243
+
244
+ @add_start_docstrings(
245
+ "The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
246
+ FLAUBERT_START_DOCSTRING,
247
+ )
248
+ class TFFlaubertModel(TFFlaubertPreTrainedModel):
249
+ def __init__(self, config, *inputs, **kwargs):
250
+ super().__init__(config, *inputs, **kwargs)
251
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
252
+
253
+ @unpack_inputs
254
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
255
+ @add_code_sample_docstrings(
256
+ checkpoint=_CHECKPOINT_FOR_DOC,
257
+ output_type=TFBaseModelOutput,
258
+ config_class=_CONFIG_FOR_DOC,
259
+ )
260
+ def call(
261
+ self,
262
+ input_ids: np.ndarray | tf.Tensor | None = None,
263
+ attention_mask: np.ndarray | tf.Tensor | None = None,
264
+ langs: np.ndarray | tf.Tensor | None = None,
265
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
266
+ position_ids: np.ndarray | tf.Tensor | None = None,
267
+ lengths: np.ndarray | tf.Tensor | None = None,
268
+ cache: Optional[Dict[str, tf.Tensor]] = None,
269
+ head_mask: np.ndarray | tf.Tensor | None = None,
270
+ inputs_embeds: tf.Tensor | None = None,
271
+ output_attentions: Optional[bool] = None,
272
+ output_hidden_states: Optional[bool] = None,
273
+ return_dict: Optional[bool] = None,
274
+ training: Optional[bool] = False,
275
+ ) -> Union[Tuple, TFBaseModelOutput]:
276
+ outputs = self.transformer(
277
+ input_ids=input_ids,
278
+ attention_mask=attention_mask,
279
+ langs=langs,
280
+ token_type_ids=token_type_ids,
281
+ position_ids=position_ids,
282
+ lengths=lengths,
283
+ cache=cache,
284
+ head_mask=head_mask,
285
+ inputs_embeds=inputs_embeds,
286
+ output_attentions=output_attentions,
287
+ output_hidden_states=output_hidden_states,
288
+ return_dict=return_dict,
289
+ training=training,
290
+ )
291
+
292
+ return outputs
293
+
294
+ def build(self, input_shape=None):
295
+ if self.built:
296
+ return
297
+ self.built = True
298
+ if getattr(self, "transformer", None) is not None:
299
+ with tf.name_scope(self.transformer.name):
300
+ self.transformer.build(None)
301
+
302
+
303
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert
304
+ class TFFlaubertMultiHeadAttention(keras.layers.Layer):
305
+ NEW_ID = itertools.count()
306
+
307
+ def __init__(self, n_heads, dim, config, **kwargs):
308
+ super().__init__(**kwargs)
309
+ self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID)
310
+ self.dim = dim
311
+ self.n_heads = n_heads
312
+ self.output_attentions = config.output_attentions
313
+ assert self.dim % self.n_heads == 0
314
+
315
+ self.q_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin")
316
+ self.k_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin")
317
+ self.v_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin")
318
+ self.out_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin")
319
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
320
+ self.pruned_heads = set()
321
+ self.dim = dim
322
+
323
+ def prune_heads(self, heads):
324
+ raise NotImplementedError
325
+
326
+ def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):
327
+ """
328
+ Self-attention (if kv is None) or attention over source sentence (provided by kv).
329
+ """
330
+ # Input is (bs, qlen, dim)
331
+ # Mask is (bs, klen) (non-causal) or (bs, klen, klen)
332
+ bs, qlen, dim = shape_list(input)
333
+
334
+ if kv is None:
335
+ klen = qlen if cache is None else cache["slen"] + qlen
336
+ else:
337
+ klen = shape_list(kv)[1]
338
+
339
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
340
+ dim_per_head = self.dim // self.n_heads
341
+ mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)
342
+
343
+ def shape(x):
344
+ """projection"""
345
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
346
+
347
+ def unshape(x):
348
+ """compute context"""
349
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
350
+
351
+ q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
352
+
353
+ if kv is None:
354
+ k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
355
+ v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
356
+ elif cache is None or self.layer_id not in cache:
357
+ k = v = kv
358
+ k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
359
+ v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
360
+
361
+ if cache is not None:
362
+ if self.layer_id in cache:
363
+ if kv is None:
364
+ k_, v_ = cache[self.layer_id]
365
+ k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)
366
+ v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)
367
+ else:
368
+ k, v = cache[self.layer_id]
369
+
370
+ cache[self.layer_id] = (k, v)
371
+
372
+ f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype)
373
+ q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head)
374
+ k = tf.cast(k, dtype=q.dtype)
375
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)
376
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
377
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
378
+ mask = tf.cast(mask, dtype=scores.dtype)
379
+ scores = scores - 1e30 * (1.0 - mask)
380
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
381
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
382
+
383
+ # Mask heads if we want to
384
+ if head_mask is not None:
385
+ weights = weights * head_mask
386
+
387
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
388
+ context = unshape(context) # (bs, qlen, dim)
389
+ outputs = (self.out_lin(context),)
390
+
391
+ if output_attentions:
392
+ outputs = outputs + (weights,)
393
+
394
+ return outputs
395
+
396
+ def build(self, input_shape=None):
397
+ if self.built:
398
+ return
399
+ self.built = True
400
+ if getattr(self, "q_lin", None) is not None:
401
+ with tf.name_scope(self.q_lin.name):
402
+ self.q_lin.build([None, None, self.dim])
403
+ if getattr(self, "k_lin", None) is not None:
404
+ with tf.name_scope(self.k_lin.name):
405
+ self.k_lin.build([None, None, self.dim])
406
+ if getattr(self, "v_lin", None) is not None:
407
+ with tf.name_scope(self.v_lin.name):
408
+ self.v_lin.build([None, None, self.dim])
409
+ if getattr(self, "out_lin", None) is not None:
410
+ with tf.name_scope(self.out_lin.name):
411
+ self.out_lin.build([None, None, self.dim])
412
+
413
+
414
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN
415
+ class TFFlaubertTransformerFFN(keras.layers.Layer):
416
+ def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):
417
+ super().__init__(**kwargs)
418
+
419
+ self.lin1 = keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1")
420
+ self.lin2 = keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2")
421
+ self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu")
422
+ self.dropout = keras.layers.Dropout(config.dropout)
423
+ self.in_dim = in_dim
424
+ self.dim_hidden = dim_hidden
425
+
426
+ def call(self, input, training=False):
427
+ x = self.lin1(input)
428
+ x = self.act(x)
429
+ x = self.lin2(x)
430
+ x = self.dropout(x, training=training)
431
+
432
+ return x
433
+
434
+ def build(self, input_shape=None):
435
+ if self.built:
436
+ return
437
+ self.built = True
438
+ if getattr(self, "lin1", None) is not None:
439
+ with tf.name_scope(self.lin1.name):
440
+ self.lin1.build([None, None, self.in_dim])
441
+ if getattr(self, "lin2", None) is not None:
442
+ with tf.name_scope(self.lin2.name):
443
+ self.lin2.build([None, None, self.dim_hidden])
444
+
445
+
446
+ @keras_serializable
447
+ class TFFlaubertMainLayer(keras.layers.Layer):
448
+ config_class = FlaubertConfig
449
+
450
+ def __init__(self, config, **kwargs):
451
+ super().__init__(**kwargs)
452
+
453
+ self.config = config
454
+ self.n_heads = config.n_heads
455
+ self.n_langs = config.n_langs
456
+ self.dim = config.emb_dim
457
+ self.hidden_dim = self.dim * 4
458
+ self.n_words = config.n_words
459
+ self.pad_index = config.pad_index
460
+ self.causal = config.causal
461
+ self.n_layers = config.n_layers
462
+ self.use_lang_emb = config.use_lang_emb
463
+ self.layerdrop = getattr(config, "layerdrop", 0.0)
464
+ self.pre_norm = getattr(config, "pre_norm", False)
465
+ self.output_attentions = config.output_attentions
466
+ self.output_hidden_states = config.output_hidden_states
467
+ self.return_dict = config.use_return_dict
468
+ self.max_position_embeddings = config.max_position_embeddings
469
+ self.embed_init_std = config.embed_init_std
470
+ self.dropout = keras.layers.Dropout(config.dropout)
471
+ self.embeddings = TFSharedEmbeddings(
472
+ self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings"
473
+ )
474
+ self.layer_norm_emb = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb")
475
+ self.attentions = []
476
+ self.layer_norm1 = []
477
+ self.ffns = []
478
+ self.layer_norm2 = []
479
+
480
+ for i in range(self.n_layers):
481
+ self.attentions.append(
482
+ TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f"attentions_._{i}")
483
+ )
484
+ self.layer_norm1.append(
485
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm1_._{i}")
486
+ )
487
+ # if self.is_decoder:
488
+ # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
489
+ # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
490
+ self.ffns.append(
491
+ TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f"ffns_._{i}")
492
+ )
493
+ self.layer_norm2.append(
494
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm2_._{i}")
495
+ )
496
+
497
+ def build(self, input_shape=None):
498
+ with tf.name_scope("position_embeddings"):
499
+ self.position_embeddings = self.add_weight(
500
+ name="embeddings",
501
+ shape=[self.max_position_embeddings, self.dim],
502
+ initializer=get_initializer(self.embed_init_std),
503
+ )
504
+
505
+ if self.n_langs > 1 and self.use_lang_emb:
506
+ with tf.name_scope("lang_embeddings"):
507
+ self.lang_embeddings = self.add_weight(
508
+ name="embeddings",
509
+ shape=[self.n_langs, self.dim],
510
+ initializer=get_initializer(self.embed_init_std),
511
+ )
512
+
513
+ if self.built:
514
+ return
515
+ self.built = True
516
+ if getattr(self, "embeddings", None) is not None:
517
+ with tf.name_scope(self.embeddings.name):
518
+ self.embeddings.build(None)
519
+ if getattr(self, "layer_norm_emb", None) is not None:
520
+ with tf.name_scope(self.layer_norm_emb.name):
521
+ self.layer_norm_emb.build([None, None, self.dim])
522
+ for layer in self.attentions:
523
+ with tf.name_scope(layer.name):
524
+ layer.build(None)
525
+ for layer in self.layer_norm1:
526
+ with tf.name_scope(layer.name):
527
+ layer.build([None, None, self.dim])
528
+ for layer in self.ffns:
529
+ with tf.name_scope(layer.name):
530
+ layer.build(None)
531
+ for layer in self.layer_norm2:
532
+ with tf.name_scope(layer.name):
533
+ layer.build([None, None, self.dim])
534
+
535
+ def get_input_embeddings(self):
536
+ return self.embeddings
537
+
538
+ def set_input_embeddings(self, value):
539
+ self.embeddings.weight = value
540
+ self.embeddings.vocab_size = shape_list(value)[0]
541
+
542
+ @unpack_inputs
543
+ def call(
544
+ self,
545
+ input_ids: np.ndarray | tf.Tensor | None = None,
546
+ attention_mask: np.ndarray | tf.Tensor | None = None,
547
+ langs: np.ndarray | tf.Tensor | None = None,
548
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
549
+ position_ids: np.ndarray | tf.Tensor | None = None,
550
+ lengths: np.ndarray | tf.Tensor | None = None,
551
+ cache: Optional[Dict[str, tf.Tensor]] = None,
552
+ head_mask: np.ndarray | tf.Tensor | None = None,
553
+ inputs_embeds: tf.Tensor | None = None,
554
+ output_attentions: Optional[bool] = None,
555
+ output_hidden_states: Optional[bool] = None,
556
+ return_dict: Optional[bool] = None,
557
+ training: Optional[bool] = False,
558
+ ) -> Union[Tuple, TFBaseModelOutput]:
559
+ # removed: src_enc=None, src_len=None
560
+
561
+ if input_ids is not None and inputs_embeds is not None:
562
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
563
+ elif input_ids is not None:
564
+ bs, slen = shape_list(input_ids)
565
+ elif inputs_embeds is not None:
566
+ bs, slen = shape_list(inputs_embeds)[:2]
567
+ else:
568
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
569
+
570
+ if lengths is None:
571
+ if input_ids is not None:
572
+ lengths = tf.reduce_sum(
573
+ tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1
574
+ )
575
+ else:
576
+ lengths = tf.convert_to_tensor([slen] * bs)
577
+ # mask = input_ids != self.pad_index
578
+
579
+ # check inputs
580
+ # assert shape_list(lengths)[0] == bs
581
+ (
582
+ tf.debugging.assert_equal(shape_list(lengths)[0], bs),
583
+ f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched",
584
+ )
585
+ # assert lengths.max().item() <= slen
586
+ # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
587
+ # assert (src_enc is None) == (src_len is None)
588
+ # if src_enc is not None:
589
+ # assert self.is_decoder
590
+ # assert src_enc.size(0) == bs
591
+
592
+ # generate masks
593
+ mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
594
+ # if self.is_decoder and src_enc is not None:
595
+ # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
596
+
597
+ # position_ids
598
+ if position_ids is None:
599
+ position_ids = tf.expand_dims(tf.range(slen), axis=0)
600
+ position_ids = tf.tile(position_ids, (bs, 1))
601
+
602
+ # assert shape_list(position_ids) == [bs, slen] # (slen, bs)
603
+ (
604
+ tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]),
605
+ f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched",
606
+ )
607
+ # position_ids = position_ids.transpose(0, 1)
608
+
609
+ # langs
610
+ if langs is not None:
611
+ # assert shape_list(langs) == [bs, slen] # (slen, bs)
612
+ (
613
+ tf.debugging.assert_equal(shape_list(langs), [bs, slen]),
614
+ f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched",
615
+ )
616
+ # langs = langs.transpose(0, 1)
617
+
618
+ # Prepare head mask if needed
619
+ # 1.0 in head_mask indicate we keep the head
620
+ # attention_probs has shape bsz x n_heads x N x N
621
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
622
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
623
+ if head_mask is not None:
624
+ raise NotImplementedError
625
+ else:
626
+ head_mask = [None] * self.n_layers
627
+
628
+ # do not recompute cached elements
629
+ if cache is not None and input_ids is not None:
630
+ _slen = slen - cache["slen"]
631
+ input_ids = input_ids[:, -_slen:]
632
+ position_ids = position_ids[:, -_slen:]
633
+ if langs is not None:
634
+ langs = langs[:, -_slen:]
635
+ mask = mask[:, -_slen:]
636
+ attn_mask = attn_mask[:, -_slen:]
637
+
638
+ # embeddings
639
+ if inputs_embeds is None:
640
+ check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size)
641
+ inputs_embeds = self.embeddings(input_ids)
642
+
643
+ tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids)
644
+
645
+ if langs is not None and self.use_lang_emb:
646
+ tensor = tensor + tf.gather(self.lang_embeddings, langs)
647
+ if token_type_ids is not None:
648
+ tensor = tensor + self.embeddings(token_type_ids)
649
+
650
+ tensor = self.layer_norm_emb(tensor)
651
+ tensor = self.dropout(tensor, training=training)
652
+ mask = tf.cast(mask, dtype=tensor.dtype)
653
+ tensor = tensor * tf.expand_dims(mask, axis=-1)
654
+
655
+ # hidden_states and attentions cannot be None in graph mode.
656
+ hidden_states = () if output_hidden_states else None
657
+ attentions = () if output_attentions else None
658
+
659
+ # transformer layers
660
+ for i in range(self.n_layers):
661
+ # LayerDrop
662
+ dropout_probability = random.uniform(0, 1)
663
+
664
+ if training and (dropout_probability < self.layerdrop):
665
+ continue
666
+
667
+ if output_hidden_states:
668
+ hidden_states = hidden_states + (tensor,)
669
+
670
+ # self attention
671
+ if not self.pre_norm:
672
+ attn_outputs = self.attentions[i](
673
+ tensor,
674
+ attn_mask,
675
+ None,
676
+ cache,
677
+ head_mask[i],
678
+ output_attentions,
679
+ training=training,
680
+ )
681
+ attn = attn_outputs[0]
682
+
683
+ if output_attentions:
684
+ attentions = attentions + (attn_outputs[1],)
685
+
686
+ attn = self.dropout(attn, training=training)
687
+ tensor = tensor + attn
688
+ tensor = self.layer_norm1[i](tensor)
689
+ else:
690
+ tensor_normalized = self.layer_norm1[i](tensor)
691
+ attn_outputs = self.attentions[i](
692
+ tensor_normalized,
693
+ attn_mask,
694
+ None,
695
+ cache,
696
+ head_mask[i],
697
+ output_attentions,
698
+ training=training,
699
+ )
700
+ attn = attn_outputs[0]
701
+
702
+ if output_attentions:
703
+ attentions = attentions + (attn_outputs[1],)
704
+
705
+ attn = self.dropout(attn, training=training)
706
+ tensor = tensor + attn
707
+
708
+ # encoder attention (for decoder only)
709
+ # if self.is_decoder and src_enc is not None:
710
+ # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
711
+ # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
712
+ # tensor = tensor + attn
713
+ # tensor = self.layer_norm15[i](tensor)
714
+
715
+ # FFN
716
+ if not self.pre_norm:
717
+ tensor = tensor + self.ffns[i](tensor)
718
+ tensor = self.layer_norm2[i](tensor)
719
+ else:
720
+ tensor_normalized = self.layer_norm2[i](tensor)
721
+ tensor = tensor + self.ffns[i](tensor_normalized)
722
+
723
+ tensor = tensor * tf.expand_dims(mask, axis=-1)
724
+
725
+ # Add last hidden state
726
+ if output_hidden_states:
727
+ hidden_states = hidden_states + (tensor,)
728
+
729
+ # update cache length
730
+ if cache is not None:
731
+ cache["slen"] += tensor.size(1)
732
+
733
+ # move back sequence length to dimension 0
734
+ # tensor = tensor.transpose(0, 1)
735
+
736
+ if not return_dict:
737
+ return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
738
+
739
+ return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
740
+
741
+
742
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer
743
+ class TFFlaubertPredLayer(keras.layers.Layer):
744
+ """
745
+ Prediction layer (cross_entropy or adaptive_softmax).
746
+ """
747
+
748
+ def __init__(self, config, input_embeddings, **kwargs):
749
+ super().__init__(**kwargs)
750
+
751
+ self.asm = config.asm
752
+ self.n_words = config.n_words
753
+ self.pad_index = config.pad_index
754
+
755
+ if config.asm is False:
756
+ self.input_embeddings = input_embeddings
757
+ else:
758
+ raise NotImplementedError
759
+ # self.proj = nn.AdaptiveLogSoftmaxWithLoss(
760
+ # in_features=dim,
761
+ # n_classes=config.n_words,
762
+ # cutoffs=config.asm_cutoffs,
763
+ # div_value=config.asm_div_value,
764
+ # head_bias=True, # default is False
765
+ # )
766
+
767
+ def build(self, input_shape):
768
+ # The output weights are the same as the input embeddings, but there is an output-only bias for each token.
769
+ self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias")
770
+
771
+ super().build(input_shape)
772
+
773
+ def get_output_embeddings(self):
774
+ return self.input_embeddings
775
+
776
+ def set_output_embeddings(self, value):
777
+ self.input_embeddings.weight = value
778
+ self.input_embeddings.vocab_size = shape_list(value)[0]
779
+
780
+ def get_bias(self):
781
+ return {"bias": self.bias}
782
+
783
+ def set_bias(self, value):
784
+ self.bias = value["bias"]
785
+ self.vocab_size = shape_list(value["bias"])[0]
786
+
787
+ def call(self, hidden_states):
788
+ hidden_states = self.input_embeddings(hidden_states, mode="linear")
789
+ hidden_states = hidden_states + self.bias
790
+
791
+ return hidden_states
792
+
793
+
794
+ @dataclass
795
+ class TFFlaubertWithLMHeadModelOutput(ModelOutput):
796
+ """
797
+ Base class for [`TFFlaubertWithLMHeadModel`] outputs.
798
+
799
+ Args:
800
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
801
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
802
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
803
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
804
+ `(batch_size, sequence_length, hidden_size)`.
805
+
806
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
807
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
808
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
809
+ sequence_length)`.
810
+
811
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
812
+ heads.
813
+ """
814
+
815
+ logits: tf.Tensor = None
816
+ hidden_states: Tuple[tf.Tensor] | None = None
817
+ attentions: Tuple[tf.Tensor] | None = None
818
+
819
+
820
+ @add_start_docstrings(
821
+ """
822
+ The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
823
+ embeddings).
824
+ """,
825
+ FLAUBERT_START_DOCSTRING,
826
+ )
827
+ class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel):
828
+ def __init__(self, config, *inputs, **kwargs):
829
+ super().__init__(config, *inputs, **kwargs)
830
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
831
+ self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj")
832
+ # Flaubert does not have past caching features
833
+ self.supports_xla_generation = False
834
+
835
+ def get_lm_head(self):
836
+ return self.pred_layer
837
+
838
+ def get_prefix_bias_name(self):
839
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
840
+ return self.name + "/" + self.pred_layer.name
841
+
842
+ def prepare_inputs_for_generation(self, inputs, **kwargs):
843
+ mask_token_id = self.config.mask_token_id
844
+ lang_id = self.config.lang_id
845
+
846
+ effective_batch_size = inputs.shape[0]
847
+ mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id
848
+ inputs = tf.concat([inputs, mask_token], axis=1)
849
+
850
+ if lang_id is not None:
851
+ langs = tf.ones_like(inputs) * lang_id
852
+ else:
853
+ langs = None
854
+ return {"input_ids": inputs, "langs": langs}
855
+
856
+ @unpack_inputs
857
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
858
+ @add_code_sample_docstrings(
859
+ checkpoint=_CHECKPOINT_FOR_DOC,
860
+ output_type=TFFlaubertWithLMHeadModelOutput,
861
+ config_class=_CONFIG_FOR_DOC,
862
+ )
863
+ def call(
864
+ self,
865
+ input_ids: np.ndarray | tf.Tensor | None = None,
866
+ attention_mask: np.ndarray | tf.Tensor | None = None,
867
+ langs: np.ndarray | tf.Tensor | None = None,
868
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
869
+ position_ids: np.ndarray | tf.Tensor | None = None,
870
+ lengths: np.ndarray | tf.Tensor | None = None,
871
+ cache: Optional[Dict[str, tf.Tensor]] = None,
872
+ head_mask: np.ndarray | tf.Tensor | None = None,
873
+ inputs_embeds: tf.Tensor | None = None,
874
+ output_attentions: Optional[bool] = None,
875
+ output_hidden_states: Optional[bool] = None,
876
+ return_dict: Optional[bool] = None,
877
+ training: Optional[bool] = False,
878
+ ) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]:
879
+ transformer_outputs = self.transformer(
880
+ input_ids=input_ids,
881
+ attention_mask=attention_mask,
882
+ langs=langs,
883
+ token_type_ids=token_type_ids,
884
+ position_ids=position_ids,
885
+ lengths=lengths,
886
+ cache=cache,
887
+ head_mask=head_mask,
888
+ inputs_embeds=inputs_embeds,
889
+ output_attentions=output_attentions,
890
+ output_hidden_states=output_hidden_states,
891
+ return_dict=return_dict,
892
+ training=training,
893
+ )
894
+ output = transformer_outputs[0]
895
+ outputs = self.pred_layer(output)
896
+
897
+ if not return_dict:
898
+ return (outputs,) + transformer_outputs[1:]
899
+
900
+ return TFFlaubertWithLMHeadModelOutput(
901
+ logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions
902
+ )
903
+
904
+ def build(self, input_shape=None):
905
+ if self.built:
906
+ return
907
+ self.built = True
908
+ if getattr(self, "transformer", None) is not None:
909
+ with tf.name_scope(self.transformer.name):
910
+ self.transformer.build(None)
911
+ if getattr(self, "pred_layer", None) is not None:
912
+ with tf.name_scope(self.pred_layer.name):
913
+ self.pred_layer.build(None)
914
+
915
+
916
+ @add_start_docstrings(
917
+ """
918
+ Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
919
+ e.g. for GLUE tasks.
920
+ """,
921
+ FLAUBERT_START_DOCSTRING,
922
+ )
923
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
924
+ class TFFlaubertForSequenceClassification(TFFlaubertPreTrainedModel, TFSequenceClassificationLoss):
925
+ def __init__(self, config, *inputs, **kwargs):
926
+ super().__init__(config, *inputs, **kwargs)
927
+ self.num_labels = config.num_labels
928
+
929
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
930
+ self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
931
+
932
+ @unpack_inputs
933
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
934
+ @add_code_sample_docstrings(
935
+ checkpoint=_CHECKPOINT_FOR_DOC,
936
+ output_type=TFSequenceClassifierOutput,
937
+ config_class=_CONFIG_FOR_DOC,
938
+ )
939
+ def call(
940
+ self,
941
+ input_ids: TFModelInputType | None = None,
942
+ attention_mask: np.ndarray | tf.Tensor | None = None,
943
+ langs: np.ndarray | tf.Tensor | None = None,
944
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
945
+ position_ids: np.ndarray | tf.Tensor | None = None,
946
+ lengths: np.ndarray | tf.Tensor | None = None,
947
+ cache: Optional[Dict[str, tf.Tensor]] = None,
948
+ head_mask: np.ndarray | tf.Tensor | None = None,
949
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
950
+ output_attentions: Optional[bool] = None,
951
+ output_hidden_states: Optional[bool] = None,
952
+ return_dict: Optional[bool] = None,
953
+ labels: np.ndarray | tf.Tensor | None = None,
954
+ training: bool = False,
955
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
956
+ r"""
957
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
958
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
959
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
960
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
961
+ """
962
+ transformer_outputs = self.transformer(
963
+ input_ids=input_ids,
964
+ attention_mask=attention_mask,
965
+ langs=langs,
966
+ token_type_ids=token_type_ids,
967
+ position_ids=position_ids,
968
+ lengths=lengths,
969
+ cache=cache,
970
+ head_mask=head_mask,
971
+ inputs_embeds=inputs_embeds,
972
+ output_attentions=output_attentions,
973
+ output_hidden_states=output_hidden_states,
974
+ return_dict=return_dict,
975
+ training=training,
976
+ )
977
+ output = transformer_outputs[0]
978
+
979
+ logits = self.sequence_summary(output)
980
+
981
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
982
+
983
+ if not return_dict:
984
+ output = (logits,) + transformer_outputs[1:]
985
+ return ((loss,) + output) if loss is not None else output
986
+
987
+ return TFSequenceClassifierOutput(
988
+ loss=loss,
989
+ logits=logits,
990
+ hidden_states=transformer_outputs.hidden_states,
991
+ attentions=transformer_outputs.attentions,
992
+ )
993
+
994
+ def build(self, input_shape=None):
995
+ if self.built:
996
+ return
997
+ self.built = True
998
+ if getattr(self, "transformer", None) is not None:
999
+ with tf.name_scope(self.transformer.name):
1000
+ self.transformer.build(None)
1001
+ if getattr(self, "sequence_summary", None) is not None:
1002
+ with tf.name_scope(self.sequence_summary.name):
1003
+ self.sequence_summary.build(None)
1004
+
1005
+
1006
+ @add_start_docstrings(
1007
+ """
1008
+ Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1009
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1010
+ """,
1011
+ FLAUBERT_START_DOCSTRING,
1012
+ )
1013
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1014
+ class TFFlaubertForQuestionAnsweringSimple(TFFlaubertPreTrainedModel, TFQuestionAnsweringLoss):
1015
+ def __init__(self, config, *inputs, **kwargs):
1016
+ super().__init__(config, *inputs, **kwargs)
1017
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
1018
+ self.qa_outputs = keras.layers.Dense(
1019
+ config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs"
1020
+ )
1021
+ self.config = config
1022
+
1023
+ @unpack_inputs
1024
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1025
+ @add_code_sample_docstrings(
1026
+ checkpoint=_CHECKPOINT_FOR_DOC,
1027
+ output_type=TFQuestionAnsweringModelOutput,
1028
+ config_class=_CONFIG_FOR_DOC,
1029
+ )
1030
+ def call(
1031
+ self,
1032
+ input_ids: TFModelInputType | None = None,
1033
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1034
+ langs: np.ndarray | tf.Tensor | None = None,
1035
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1036
+ position_ids: np.ndarray | tf.Tensor | None = None,
1037
+ lengths: np.ndarray | tf.Tensor | None = None,
1038
+ cache: Optional[Dict[str, tf.Tensor]] = None,
1039
+ head_mask: np.ndarray | tf.Tensor | None = None,
1040
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ start_positions: np.ndarray | tf.Tensor | None = None,
1045
+ end_positions: np.ndarray | tf.Tensor | None = None,
1046
+ training: bool = False,
1047
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1048
+ r"""
1049
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1050
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1051
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1052
+ are not taken into account for computing the loss.
1053
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1054
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1055
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1056
+ are not taken into account for computing the loss.
1057
+ """
1058
+ transformer_outputs = self.transformer(
1059
+ input_ids=input_ids,
1060
+ attention_mask=attention_mask,
1061
+ langs=langs,
1062
+ token_type_ids=token_type_ids,
1063
+ position_ids=position_ids,
1064
+ lengths=lengths,
1065
+ cache=cache,
1066
+ head_mask=head_mask,
1067
+ inputs_embeds=inputs_embeds,
1068
+ output_attentions=output_attentions,
1069
+ output_hidden_states=output_hidden_states,
1070
+ return_dict=return_dict,
1071
+ training=training,
1072
+ )
1073
+ sequence_output = transformer_outputs[0]
1074
+
1075
+ logits = self.qa_outputs(sequence_output)
1076
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1077
+ start_logits = tf.squeeze(start_logits, axis=-1)
1078
+ end_logits = tf.squeeze(end_logits, axis=-1)
1079
+
1080
+ loss = None
1081
+ if start_positions is not None and end_positions is not None:
1082
+ labels = {"start_position": start_positions}
1083
+ labels["end_position"] = end_positions
1084
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1085
+
1086
+ if not return_dict:
1087
+ output = (start_logits, end_logits) + transformer_outputs[1:]
1088
+ return ((loss,) + output) if loss is not None else output
1089
+
1090
+ return TFQuestionAnsweringModelOutput(
1091
+ loss=loss,
1092
+ start_logits=start_logits,
1093
+ end_logits=end_logits,
1094
+ hidden_states=transformer_outputs.hidden_states,
1095
+ attentions=transformer_outputs.attentions,
1096
+ )
1097
+
1098
+ def build(self, input_shape=None):
1099
+ if self.built:
1100
+ return
1101
+ self.built = True
1102
+ if getattr(self, "transformer", None) is not None:
1103
+ with tf.name_scope(self.transformer.name):
1104
+ self.transformer.build(None)
1105
+ if getattr(self, "qa_outputs", None) is not None:
1106
+ with tf.name_scope(self.qa_outputs.name):
1107
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1108
+
1109
+
1110
+ @add_start_docstrings(
1111
+ """
1112
+ Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1113
+ Named-Entity-Recognition (NER) tasks.
1114
+ """,
1115
+ FLAUBERT_START_DOCSTRING,
1116
+ )
1117
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1118
+ class TFFlaubertForTokenClassification(TFFlaubertPreTrainedModel, TFTokenClassificationLoss):
1119
+ def __init__(self, config, *inputs, **kwargs):
1120
+ super().__init__(config, *inputs, **kwargs)
1121
+ self.num_labels = config.num_labels
1122
+
1123
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
1124
+ self.dropout = keras.layers.Dropout(config.dropout)
1125
+ self.classifier = keras.layers.Dense(
1126
+ config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier"
1127
+ )
1128
+ self.config = config
1129
+
1130
+ @unpack_inputs
1131
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1132
+ @add_code_sample_docstrings(
1133
+ checkpoint=_CHECKPOINT_FOR_DOC,
1134
+ output_type=TFTokenClassifierOutput,
1135
+ config_class=_CONFIG_FOR_DOC,
1136
+ )
1137
+ def call(
1138
+ self,
1139
+ input_ids: TFModelInputType | None = None,
1140
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1141
+ langs: np.ndarray | tf.Tensor | None = None,
1142
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1143
+ position_ids: np.ndarray | tf.Tensor | None = None,
1144
+ lengths: np.ndarray | tf.Tensor | None = None,
1145
+ cache: Optional[Dict[str, tf.Tensor]] = None,
1146
+ head_mask: np.ndarray | tf.Tensor | None = None,
1147
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1148
+ output_attentions: Optional[bool] = None,
1149
+ output_hidden_states: Optional[bool] = None,
1150
+ return_dict: Optional[bool] = None,
1151
+ labels: np.ndarray | tf.Tensor | None = None,
1152
+ training: bool = False,
1153
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1154
+ r"""
1155
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1156
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1157
+ """
1158
+ transformer_outputs = self.transformer(
1159
+ input_ids=input_ids,
1160
+ attention_mask=attention_mask,
1161
+ langs=langs,
1162
+ token_type_ids=token_type_ids,
1163
+ position_ids=position_ids,
1164
+ lengths=lengths,
1165
+ cache=cache,
1166
+ head_mask=head_mask,
1167
+ inputs_embeds=inputs_embeds,
1168
+ output_attentions=output_attentions,
1169
+ output_hidden_states=output_hidden_states,
1170
+ return_dict=return_dict,
1171
+ training=training,
1172
+ )
1173
+ sequence_output = transformer_outputs[0]
1174
+
1175
+ sequence_output = self.dropout(sequence_output, training=training)
1176
+ logits = self.classifier(sequence_output)
1177
+
1178
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1179
+
1180
+ if not return_dict:
1181
+ output = (logits,) + transformer_outputs[1:]
1182
+ return ((loss,) + output) if loss is not None else output
1183
+
1184
+ return TFTokenClassifierOutput(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ hidden_states=transformer_outputs.hidden_states,
1188
+ attentions=transformer_outputs.attentions,
1189
+ )
1190
+
1191
+ def build(self, input_shape=None):
1192
+ if self.built:
1193
+ return
1194
+ self.built = True
1195
+ if getattr(self, "transformer", None) is not None:
1196
+ with tf.name_scope(self.transformer.name):
1197
+ self.transformer.build(None)
1198
+ if getattr(self, "classifier", None) is not None:
1199
+ with tf.name_scope(self.classifier.name):
1200
+ self.classifier.build([None, None, self.config.hidden_size])
1201
+
1202
+
1203
+ @add_start_docstrings(
1204
+ """
1205
+ Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1206
+ softmax) e.g. for RocStories/SWAG tasks.
1207
+ """,
1208
+ FLAUBERT_START_DOCSTRING,
1209
+ )
1210
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1211
+ class TFFlaubertForMultipleChoice(TFFlaubertPreTrainedModel, TFMultipleChoiceLoss):
1212
+ def __init__(self, config, *inputs, **kwargs):
1213
+ super().__init__(config, *inputs, **kwargs)
1214
+
1215
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
1216
+ self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
1217
+ self.logits_proj = keras.layers.Dense(
1218
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
1219
+ )
1220
+ self.config = config
1221
+
1222
+ @property
1223
+ def dummy_inputs(self):
1224
+ """
1225
+ Dummy inputs to build the network.
1226
+
1227
+ Returns:
1228
+ tf.Tensor with dummy inputs
1229
+ """
1230
+ # Sometimes Flaubert has language embeddings so don't forget to build them as well if needed
1231
+ if self.config.use_lang_emb and self.config.n_langs > 1:
1232
+ return {
1233
+ "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
1234
+ "langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
1235
+ }
1236
+ else:
1237
+ return {
1238
+ "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
1239
+ }
1240
+
1241
+ @unpack_inputs
1242
+ @add_start_docstrings_to_model_forward(
1243
+ FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1244
+ )
1245
+ @add_code_sample_docstrings(
1246
+ checkpoint=_CHECKPOINT_FOR_DOC,
1247
+ output_type=TFMultipleChoiceModelOutput,
1248
+ config_class=_CONFIG_FOR_DOC,
1249
+ )
1250
+ def call(
1251
+ self,
1252
+ input_ids: TFModelInputType | None = None,
1253
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1254
+ langs: np.ndarray | tf.Tensor | None = None,
1255
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1256
+ position_ids: np.ndarray | tf.Tensor | None = None,
1257
+ lengths: np.ndarray | tf.Tensor | None = None,
1258
+ cache: Optional[Dict[str, tf.Tensor]] = None,
1259
+ head_mask: np.ndarray | tf.Tensor | None = None,
1260
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1261
+ output_attentions: Optional[bool] = None,
1262
+ output_hidden_states: Optional[bool] = None,
1263
+ return_dict: Optional[bool] = None,
1264
+ labels: np.ndarray | tf.Tensor | None = None,
1265
+ training: bool = False,
1266
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1267
+ if input_ids is not None:
1268
+ num_choices = shape_list(input_ids)[1]
1269
+ seq_length = shape_list(input_ids)[2]
1270
+ else:
1271
+ num_choices = shape_list(inputs_embeds)[1]
1272
+ seq_length = shape_list(inputs_embeds)[2]
1273
+
1274
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1275
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1276
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1277
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1278
+ flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None
1279
+ flat_inputs_embeds = (
1280
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1281
+ if inputs_embeds is not None
1282
+ else None
1283
+ )
1284
+
1285
+ if lengths is not None:
1286
+ logger.warning(
1287
+ "The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the "
1288
+ "attention mask instead.",
1289
+ )
1290
+ lengths = None
1291
+
1292
+ transformer_outputs = self.transformer(
1293
+ flat_input_ids,
1294
+ flat_attention_mask,
1295
+ flat_langs,
1296
+ flat_token_type_ids,
1297
+ flat_position_ids,
1298
+ lengths,
1299
+ cache,
1300
+ head_mask,
1301
+ flat_inputs_embeds,
1302
+ output_attentions,
1303
+ output_hidden_states,
1304
+ return_dict=return_dict,
1305
+ training=training,
1306
+ )
1307
+ output = transformer_outputs[0]
1308
+ logits = self.sequence_summary(output)
1309
+ logits = self.logits_proj(logits)
1310
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1311
+
1312
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1313
+
1314
+ if not return_dict:
1315
+ output = (reshaped_logits,) + transformer_outputs[1:]
1316
+ return ((loss,) + output) if loss is not None else output
1317
+
1318
+ return TFMultipleChoiceModelOutput(
1319
+ loss=loss,
1320
+ logits=reshaped_logits,
1321
+ hidden_states=transformer_outputs.hidden_states,
1322
+ attentions=transformer_outputs.attentions,
1323
+ )
1324
+
1325
+ def build(self, input_shape=None):
1326
+ if self.built:
1327
+ return
1328
+ self.built = True
1329
+ if getattr(self, "transformer", None) is not None:
1330
+ with tf.name_scope(self.transformer.name):
1331
+ self.transformer.build(None)
1332
+ if getattr(self, "sequence_summary", None) is not None:
1333
+ with tf.name_scope(self.sequence_summary.name):
1334
+ self.sequence_summary.build(None)
1335
+ if getattr(self, "logits_proj", None) is not None:
1336
+ with tf.name_scope(self.logits_proj.name):
1337
+ self.logits_proj.build([None, None, self.config.num_labels])
llmeval-env/lib/python3.10/site-packages/transformers/models/flaubert/tokenization_flaubert.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Flaubert."""
16
+
17
+
18
+ import json
19
+ import os
20
+ import re
21
+ import unicodedata
22
+ from typing import List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ }
34
+
35
+
36
+ def convert_to_unicode(text):
37
+ """
38
+ Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
39
+ """
40
+
41
+ def ensure_text(s, encoding="utf-8", errors="strict"):
42
+ if isinstance(s, bytes):
43
+ return s.decode(encoding, errors)
44
+ elif isinstance(s, str):
45
+ return s
46
+ else:
47
+ raise TypeError(f"not expecting type '{type(s)}'")
48
+
49
+ return ensure_text(text, encoding="utf-8", errors="ignore")
50
+
51
+
52
+ # Copied from transformers.models.xlm.tokenization_xlm.get_pairs
53
+ def get_pairs(word):
54
+ """
55
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
56
+ strings)
57
+ """
58
+ pairs = set()
59
+ prev_char = word[0]
60
+ for char in word[1:]:
61
+ pairs.add((prev_char, char))
62
+ prev_char = char
63
+ return pairs
64
+
65
+
66
+ # Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
67
+ def replace_unicode_punct(text):
68
+ """
69
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
70
+ """
71
+ text = text.replace(",", ",")
72
+ text = re.sub(r"。\s*", ". ", text)
73
+ text = text.replace("、", ",")
74
+ text = text.replace("”", '"')
75
+ text = text.replace("“", '"')
76
+ text = text.replace("∶", ":")
77
+ text = text.replace(":", ":")
78
+ text = text.replace("?", "?")
79
+ text = text.replace("《", '"')
80
+ text = text.replace("》", '"')
81
+ text = text.replace(")", ")")
82
+ text = text.replace("!", "!")
83
+ text = text.replace("(", "(")
84
+ text = text.replace(";", ";")
85
+ text = text.replace("1", "1")
86
+ text = text.replace("」", '"')
87
+ text = text.replace("「", '"')
88
+ text = text.replace("0", "0")
89
+ text = text.replace("3", "3")
90
+ text = text.replace("2", "2")
91
+ text = text.replace("5", "5")
92
+ text = text.replace("6", "6")
93
+ text = text.replace("9", "9")
94
+ text = text.replace("7", "7")
95
+ text = text.replace("8", "8")
96
+ text = text.replace("4", "4")
97
+ text = re.sub(r".\s*", ". ", text)
98
+ text = text.replace("~", "~")
99
+ text = text.replace("’", "'")
100
+ text = text.replace("…", "...")
101
+ text = text.replace("━", "-")
102
+ text = text.replace("〈", "<")
103
+ text = text.replace("〉", ">")
104
+ text = text.replace("【", "[")
105
+ text = text.replace("】", "]")
106
+ text = text.replace("%", "%")
107
+ return text
108
+
109
+
110
+ # Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
111
+ def remove_non_printing_char(text):
112
+ """
113
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
114
+ """
115
+ output = []
116
+ for char in text:
117
+ cat = unicodedata.category(char)
118
+ if cat.startswith("C"):
119
+ continue
120
+ output.append(char)
121
+ return "".join(output)
122
+
123
+
124
+ class FlaubertTokenizer(PreTrainedTokenizer):
125
+ """
126
+ Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
127
+
128
+ - Moses preprocessing and tokenization.
129
+ - Normalizing all inputs text.
130
+ - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
131
+ "__classify__") to a vocabulary.
132
+ - The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies).
133
+
134
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
135
+ this superclass for more information regarding those methods.
136
+
137
+ Args:
138
+ vocab_file (`str`):
139
+ Vocabulary file.
140
+ merges_file (`str`):
141
+ Merges file.
142
+ do_lowercase (`bool`, *optional*, defaults to `False`):
143
+ Controls lower casing.
144
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
145
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
146
+ token instead.
147
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
148
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
149
+
150
+ <Tip>
151
+
152
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
153
+ sequence. The token used is the `cls_token`.
154
+
155
+ </Tip>
156
+
157
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
158
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
159
+ sequence classification or for a text and a question for question answering. It is also used as the last
160
+ token of a sequence built with special tokens.
161
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
162
+ The token used for padding, for example when batching sequences of different lengths.
163
+ cls_token (`str`, *optional*, defaults to `"</s>"`):
164
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
165
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
166
+ mask_token (`str`, *optional*, defaults to `"<special1>"`):
167
+ The token used for masking values. This is the token used when training this model with masked language
168
+ modeling. This is the token which the model will try to predict.
169
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
170
+ List of additional special tokens.
171
+ lang2id (`Dict[str, int]`, *optional*):
172
+ Dictionary mapping languages string identifiers to their IDs.
173
+ id2lang (`Dict[int, str]`, *optional*):
174
+ Dictionary mapping language IDs to their string identifiers.
175
+ """
176
+
177
+ vocab_files_names = VOCAB_FILES_NAMES
178
+
179
+ def __init__(
180
+ self,
181
+ vocab_file,
182
+ merges_file,
183
+ do_lowercase=False,
184
+ unk_token="<unk>",
185
+ bos_token="<s>",
186
+ sep_token="</s>",
187
+ pad_token="<pad>",
188
+ cls_token="</s>",
189
+ mask_token="<special1>",
190
+ additional_special_tokens=[
191
+ "<special0>",
192
+ "<special1>",
193
+ "<special2>",
194
+ "<special3>",
195
+ "<special4>",
196
+ "<special5>",
197
+ "<special6>",
198
+ "<special7>",
199
+ "<special8>",
200
+ "<special9>",
201
+ ],
202
+ lang2id=None,
203
+ id2lang=None,
204
+ **kwargs,
205
+ ):
206
+ do_lowercase_and_remove_accent = kwargs.pop("do_lowercase_and_remove_accent", None)
207
+ if do_lowercase_and_remove_accent is not None:
208
+ logger.warning(
209
+ "`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything."
210
+ " `FlaubertTokenizer` will always set it to `False`."
211
+ )
212
+ # always `False`
213
+ self.do_lowercase_and_remove_accent = False
214
+
215
+ self.do_lowercase = do_lowercase
216
+
217
+ try:
218
+ import sacremoses
219
+ except ImportError:
220
+ raise ImportError(
221
+ "You need to install sacremoses to use FlaubertTokenizer. "
222
+ "See https://pypi.org/project/sacremoses/ for installation."
223
+ )
224
+
225
+ self.sm = sacremoses
226
+
227
+ # cache of sm.MosesPunctNormalizer instance
228
+ self.cache_moses_punct_normalizer = {}
229
+ # cache of sm.MosesTokenizer instance
230
+ self.cache_moses_tokenizer = {}
231
+ self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
232
+ self.lang2id = lang2id
233
+ self.id2lang = id2lang
234
+ if lang2id is not None and id2lang is not None:
235
+ assert len(lang2id) == len(id2lang)
236
+
237
+ self.ja_word_tokenizer = None
238
+ self.zh_word_tokenizer = None
239
+
240
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
241
+ self.encoder = json.load(vocab_handle)
242
+ self.decoder = {v: k for k, v in self.encoder.items()}
243
+ with open(merges_file, encoding="utf-8") as merges_handle:
244
+ merges = merges_handle.read().split("\n")[:-1]
245
+ merges = [tuple(merge.split()[:2]) for merge in merges]
246
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
247
+ self.cache = {}
248
+
249
+ super().__init__(
250
+ unk_token=unk_token,
251
+ bos_token=bos_token,
252
+ sep_token=sep_token,
253
+ pad_token=pad_token,
254
+ cls_token=cls_token,
255
+ mask_token=mask_token,
256
+ additional_special_tokens=additional_special_tokens,
257
+ lang2id=lang2id,
258
+ id2lang=id2lang,
259
+ **kwargs,
260
+ )
261
+
262
+ @property
263
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
264
+ def do_lower_case(self):
265
+ return self.do_lowercase_and_remove_accent
266
+
267
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
268
+ def moses_punct_norm(self, text, lang):
269
+ if lang not in self.cache_moses_punct_normalizer:
270
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
271
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
272
+ else:
273
+ punct_normalizer = self.cache_moses_punct_normalizer[lang]
274
+ return punct_normalizer.normalize(text)
275
+
276
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
277
+ def moses_tokenize(self, text, lang):
278
+ if lang not in self.cache_moses_tokenizer:
279
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
280
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
281
+ else:
282
+ moses_tokenizer = self.cache_moses_tokenizer[lang]
283
+ return moses_tokenizer.tokenize(text, return_str=False, escape=False)
284
+
285
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
286
+ def moses_pipeline(self, text, lang):
287
+ text = replace_unicode_punct(text)
288
+ text = self.moses_punct_norm(text, lang)
289
+ text = remove_non_printing_char(text)
290
+ return text
291
+
292
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
293
+ def ja_tokenize(self, text):
294
+ if self.ja_word_tokenizer is None:
295
+ try:
296
+ import Mykytea
297
+
298
+ self.ja_word_tokenizer = Mykytea.Mykytea(
299
+ f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
300
+ )
301
+ except (AttributeError, ImportError):
302
+ logger.error(
303
+ "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
304
+ " (https://github.com/chezou/Mykytea-python) with the following steps"
305
+ )
306
+ logger.error("1. git clone [email protected]:neubig/kytea.git && cd kytea")
307
+ logger.error("2. autoreconf -i")
308
+ logger.error("3. ./configure --prefix=$HOME/local")
309
+ logger.error("4. make && make install")
310
+ logger.error("5. pip install kytea")
311
+ raise
312
+ return list(self.ja_word_tokenizer.getWS(text))
313
+
314
+ @property
315
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
316
+ def vocab_size(self):
317
+ return len(self.encoder)
318
+
319
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
320
+ def get_vocab(self):
321
+ return dict(self.encoder, **self.added_tokens_encoder)
322
+
323
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
324
+ def bpe(self, token):
325
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
326
+ if token in self.cache:
327
+ return self.cache[token]
328
+ pairs = get_pairs(word)
329
+
330
+ if not pairs:
331
+ return token + "</w>"
332
+
333
+ while True:
334
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
335
+ if bigram not in self.bpe_ranks:
336
+ break
337
+ first, second = bigram
338
+ new_word = []
339
+ i = 0
340
+ while i < len(word):
341
+ try:
342
+ j = word.index(first, i)
343
+ except ValueError:
344
+ new_word.extend(word[i:])
345
+ break
346
+ else:
347
+ new_word.extend(word[i:j])
348
+ i = j
349
+
350
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
351
+ new_word.append(first + second)
352
+ i += 2
353
+ else:
354
+ new_word.append(word[i])
355
+ i += 1
356
+ new_word = tuple(new_word)
357
+ word = new_word
358
+ if len(word) == 1:
359
+ break
360
+ else:
361
+ pairs = get_pairs(word)
362
+ word = " ".join(word)
363
+ if word == "\n </w>":
364
+ word = "\n</w>"
365
+ self.cache[token] = word
366
+ return word
367
+
368
+ def preprocess_text(self, text):
369
+ text = text.replace("``", '"').replace("''", '"')
370
+ text = convert_to_unicode(text)
371
+ text = unicodedata.normalize("NFC", text)
372
+
373
+ if self.do_lowercase:
374
+ text = text.lower()
375
+
376
+ return text
377
+
378
+ def _tokenize(self, text, bypass_tokenizer=False):
379
+ """
380
+ Tokenize a string given language code using Moses.
381
+
382
+ Details of tokenization:
383
+
384
+ - [sacremoses](https://github.com/alvations/sacremoses): port of Moses
385
+ - Install with `pip install sacremoses`
386
+
387
+ Args:
388
+ - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
389
+ (bool). If True, we only apply BPE.
390
+
391
+ Returns:
392
+ List of tokens.
393
+ """
394
+ lang = "fr"
395
+ if lang and self.lang2id and lang not in self.lang2id:
396
+ logger.error(
397
+ "Supplied language code not found in lang2id mapping. Please check that your language is supported by"
398
+ " the loaded pretrained model."
399
+ )
400
+
401
+ if bypass_tokenizer:
402
+ text = text.split()
403
+ else:
404
+ text = self.preprocess_text(text)
405
+ text = self.moses_pipeline(text, lang=lang)
406
+ text = self.moses_tokenize(text, lang=lang)
407
+
408
+ split_tokens = []
409
+ for token in text:
410
+ if token:
411
+ split_tokens.extend(list(self.bpe(token).split(" ")))
412
+
413
+ return split_tokens
414
+
415
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
416
+ def _convert_token_to_id(self, token):
417
+ """Converts a token (str) in an id using the vocab."""
418
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
419
+
420
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
421
+ def _convert_id_to_token(self, index):
422
+ """Converts an index (integer) in a token (str) using the vocab."""
423
+ return self.decoder.get(index, self.unk_token)
424
+
425
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
426
+ def convert_tokens_to_string(self, tokens):
427
+ """Converts a sequence of tokens (string) in a single string."""
428
+ out_string = "".join(tokens).replace("</w>", " ").strip()
429
+ return out_string
430
+
431
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
432
+ def build_inputs_with_special_tokens(
433
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
434
+ ) -> List[int]:
435
+ """
436
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
437
+ adding special tokens. An XLM sequence has the following format:
438
+
439
+ - single sequence: `<s> X </s>`
440
+ - pair of sequences: `<s> A </s> B </s>`
441
+
442
+ Args:
443
+ token_ids_0 (`List[int]`):
444
+ List of IDs to which the special tokens will be added.
445
+ token_ids_1 (`List[int]`, *optional*):
446
+ Optional second list of IDs for sequence pairs.
447
+
448
+ Returns:
449
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
450
+
451
+ """
452
+ bos = [self.bos_token_id]
453
+ sep = [self.sep_token_id]
454
+
455
+ if token_ids_1 is None:
456
+ return bos + token_ids_0 + sep
457
+ return bos + token_ids_0 + sep + token_ids_1 + sep
458
+
459
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
460
+ def get_special_tokens_mask(
461
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
462
+ ) -> List[int]:
463
+ """
464
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
465
+ special tokens using the tokenizer `prepare_for_model` method.
466
+
467
+ Args:
468
+ token_ids_0 (`List[int]`):
469
+ List of IDs.
470
+ token_ids_1 (`List[int]`, *optional*):
471
+ Optional second list of IDs for sequence pairs.
472
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
473
+ Whether or not the token list is already formatted with special tokens for the model.
474
+
475
+ Returns:
476
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
477
+ """
478
+
479
+ if already_has_special_tokens:
480
+ return super().get_special_tokens_mask(
481
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
482
+ )
483
+
484
+ if token_ids_1 is not None:
485
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
486
+ return [1] + ([0] * len(token_ids_0)) + [1]
487
+
488
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
489
+ def create_token_type_ids_from_sequences(
490
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
491
+ ) -> List[int]:
492
+ """
493
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
494
+ pair mask has the following format:
495
+
496
+ ```
497
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
498
+ | first sequence | second sequence |
499
+ ```
500
+
501
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
502
+
503
+ Args:
504
+ token_ids_0 (`List[int]`):
505
+ List of IDs.
506
+ token_ids_1 (`List[int]`, *optional*):
507
+ Optional second list of IDs for sequence pairs.
508
+
509
+ Returns:
510
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
511
+ """
512
+ sep = [self.sep_token_id]
513
+ cls = [self.cls_token_id]
514
+ if token_ids_1 is None:
515
+ return len(cls + token_ids_0 + sep) * [0]
516
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
517
+
518
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
519
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
520
+ if not os.path.isdir(save_directory):
521
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
522
+ return
523
+ vocab_file = os.path.join(
524
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
525
+ )
526
+ merge_file = os.path.join(
527
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
528
+ )
529
+
530
+ with open(vocab_file, "w", encoding="utf-8") as f:
531
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
532
+
533
+ index = 0
534
+ with open(merge_file, "w", encoding="utf-8") as writer:
535
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
536
+ if index != token_index:
537
+ logger.warning(
538
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
539
+ " Please check that the tokenizer is not corrupted!"
540
+ )
541
+ index = token_index
542
+ writer.write(" ".join(bpe_tokens) + "\n")
543
+ index += 1
544
+
545
+ return vocab_file, merge_file
546
+
547
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
548
+ def __getstate__(self):
549
+ state = self.__dict__.copy()
550
+ state["sm"] = None
551
+ return state
552
+
553
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
554
+ def __setstate__(self, d):
555
+ self.__dict__ = d
556
+
557
+ try:
558
+ import sacremoses
559
+ except ImportError:
560
+ raise ImportError(
561
+ "You need to install sacremoses to use XLMTokenizer. "
562
+ "See https://pypi.org/project/sacremoses/ for installation."
563
+ )
564
+
565
+ self.sm = sacremoses
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__init__.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {"configuration_gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig", "GPTJOnnxConfig"]}
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_gptj"] = [
34
+ "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "GPTJForCausalLM",
36
+ "GPTJForQuestionAnswering",
37
+ "GPTJForSequenceClassification",
38
+ "GPTJModel",
39
+ "GPTJPreTrainedModel",
40
+ ]
41
+
42
+ try:
43
+ if not is_tf_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ _import_structure["modeling_tf_gptj"] = [
49
+ "TFGPTJForCausalLM",
50
+ "TFGPTJForQuestionAnswering",
51
+ "TFGPTJForSequenceClassification",
52
+ "TFGPTJModel",
53
+ "TFGPTJPreTrainedModel",
54
+ ]
55
+
56
+ try:
57
+ if not is_flax_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ _import_structure["modeling_flax_gptj"] = [
63
+ "FlaxGPTJForCausalLM",
64
+ "FlaxGPTJModel",
65
+ "FlaxGPTJPreTrainedModel",
66
+ ]
67
+
68
+
69
+ if TYPE_CHECKING:
70
+ from .configuration_gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig, GPTJOnnxConfig
71
+
72
+ try:
73
+ if not is_torch_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .modeling_gptj import (
79
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST,
80
+ GPTJForCausalLM,
81
+ GPTJForQuestionAnswering,
82
+ GPTJForSequenceClassification,
83
+ GPTJModel,
84
+ GPTJPreTrainedModel,
85
+ )
86
+
87
+ try:
88
+ if not is_tf_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ from .modeling_tf_gptj import (
94
+ TFGPTJForCausalLM,
95
+ TFGPTJForQuestionAnswering,
96
+ TFGPTJForSequenceClassification,
97
+ TFGPTJModel,
98
+ TFGPTJPreTrainedModel,
99
+ )
100
+
101
+ try:
102
+ if not is_flax_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel
108
+
109
+ else:
110
+ import sys
111
+
112
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc ADDED
Binary file (7.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc ADDED
Binary file (38.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc ADDED
Binary file (33.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPT-J model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class GPTJConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the GPT-J
36
+ [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
37
+ [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
38
+ for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50400):
42
+ Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`GPTJModel`].
44
+ n_positions (`int`, *optional*, defaults to 2048):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ n_embd (`int`, *optional*, defaults to 4096):
48
+ Dimensionality of the embeddings and hidden states.
49
+ n_layer (`int`, *optional*, defaults to 28):
50
+ Number of hidden layers in the Transformer encoder.
51
+ n_head (`int`, *optional*, defaults to 16):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ rotary_dim (`int`, *optional*, defaults to 64):
54
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
55
+ n_inner (`int`, *optional*, defaults to None):
56
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
57
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
58
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
62
+ The dropout ratio for the embeddings.
63
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the attention.
65
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
66
+ The epsilon to use in the layer normalization layers.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import GPTJModel, GPTJConfig
76
+
77
+ >>> # Initializing a GPT-J 6B configuration
78
+ >>> configuration = GPTJConfig()
79
+
80
+ >>> # Initializing a model from the configuration
81
+ >>> model = GPTJModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "gptj"
88
+ attribute_map = {
89
+ "max_position_embeddings": "n_positions",
90
+ "hidden_size": "n_embd",
91
+ "num_attention_heads": "n_head",
92
+ "num_hidden_layers": "n_layer",
93
+ }
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_size=50400,
98
+ n_positions=2048,
99
+ n_embd=4096,
100
+ n_layer=28,
101
+ n_head=16,
102
+ rotary_dim=64,
103
+ n_inner=None,
104
+ activation_function="gelu_new",
105
+ resid_pdrop=0.0,
106
+ embd_pdrop=0.0,
107
+ attn_pdrop=0.0,
108
+ layer_norm_epsilon=1e-5,
109
+ initializer_range=0.02,
110
+ use_cache=True,
111
+ bos_token_id=50256,
112
+ eos_token_id=50256,
113
+ tie_word_embeddings=False,
114
+ **kwargs,
115
+ ):
116
+ self.vocab_size = vocab_size
117
+ self.n_positions = n_positions
118
+ self.n_embd = n_embd
119
+ self.n_layer = n_layer
120
+ self.n_head = n_head
121
+ self.n_inner = n_inner
122
+ self.rotary_dim = rotary_dim
123
+ self.activation_function = activation_function
124
+ self.resid_pdrop = resid_pdrop
125
+ self.embd_pdrop = embd_pdrop
126
+ self.attn_pdrop = attn_pdrop
127
+ self.layer_norm_epsilon = layer_norm_epsilon
128
+ self.initializer_range = initializer_range
129
+ self.use_cache = use_cache
130
+
131
+ self.bos_token_id = bos_token_id
132
+ self.eos_token_id = eos_token_id
133
+
134
+ super().__init__(
135
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
136
+ )
137
+
138
+
139
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
140
+ class GPTJOnnxConfig(OnnxConfigWithPast):
141
+ def __init__(
142
+ self,
143
+ config: PretrainedConfig,
144
+ task: str = "default",
145
+ patching_specs: List[PatchingSpec] = None,
146
+ use_past: bool = False,
147
+ ):
148
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
149
+ if not getattr(self._config, "pad_token_id", None):
150
+ # TODO: how to do that better?
151
+ self._config.pad_token_id = 0
152
+
153
+ @property
154
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
155
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
156
+ if self.use_past:
157
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
158
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
159
+ else:
160
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
161
+
162
+ return common_inputs
163
+
164
+ @property
165
+ def num_layers(self) -> int:
166
+ return self._config.n_layer
167
+
168
+ @property
169
+ def num_attention_heads(self) -> int:
170
+ return self._config.n_head
171
+
172
+ def generate_dummy_inputs(
173
+ self,
174
+ tokenizer: PreTrainedTokenizer,
175
+ batch_size: int = -1,
176
+ seq_length: int = -1,
177
+ is_pair: bool = False,
178
+ framework: Optional[TensorType] = None,
179
+ ) -> Mapping[str, Any]:
180
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
181
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
182
+ )
183
+
184
+ # We need to order the input in the way they appears in the forward()
185
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
186
+
187
+ # Need to add the past_keys
188
+ if self.use_past:
189
+ if not is_torch_available():
190
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
191
+ else:
192
+ import torch
193
+
194
+ batch, seqlen = common_inputs["input_ids"].shape
195
+ # Not using the same length for past_key_values
196
+ past_key_values_length = seqlen + 2
197
+ past_shape = (
198
+ batch,
199
+ self.num_attention_heads,
200
+ past_key_values_length,
201
+ self._config.hidden_size // self.num_attention_heads,
202
+ )
203
+ ordered_inputs["past_key_values"] = [
204
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
205
+ ]
206
+
207
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
208
+ if self.use_past:
209
+ mask_dtype = ordered_inputs["attention_mask"].dtype
210
+ ordered_inputs["attention_mask"] = torch.cat(
211
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
212
+ )
213
+
214
+ return ordered_inputs
215
+
216
+ @property
217
+ def default_onnx_opset(self) -> int:
218
+ return 13
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from functools import partial
17
+ from typing import Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen.attention import dot_product_attention_weights
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+ from jax import lax
28
+
29
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
30
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
31
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_gptj import GPTJConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CHECKPOINT_FOR_DOC = "gptj"
38
+ _CONFIG_FOR_DOC = "GPTJConfig"
39
+
40
+
41
+ GPTJ_START_DOCSTRING = r"""
42
+
43
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
44
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
45
+ etc.)
46
+
47
+ This model is also a Flax Linen
48
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
49
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
50
+
51
+ Finally, this model supports inherent JAX features such as:
52
+
53
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
54
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
55
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
56
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
57
+
58
+ Parameters:
59
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
60
+ Initializing with a config file does not load the weights associated with the model, only the
61
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
62
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
63
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
64
+ `jax.numpy.bfloat16` (on TPUs).
65
+
66
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
67
+ specified all the computation will be performed with the given `dtype`.
68
+
69
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
70
+ parameters.**
71
+
72
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
73
+ [`~FlaxPreTrainedModel.to_bf16`].
74
+ """
75
+
76
+ GPTJ_INPUTS_DOCSTRING = r"""
77
+ Args:
78
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
79
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
80
+
81
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
82
+ [`PreTrainedTokenizer.__call__`] for details.
83
+
84
+ [What are input IDs?](../glossary#input-ids)
85
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
86
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
87
+
88
+ - 1 for tokens that are **not masked**,
89
+ - 0 for tokens that are **masked**.
90
+
91
+ [What are attention masks?](../glossary#attention-mask)
92
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
93
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
94
+ config.max_position_embeddings - 1]`.
95
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
96
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
97
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
98
+ output_attentions (`bool`, *optional*):
99
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
100
+ tensors for more detail.
101
+ output_hidden_states (`bool`, *optional*):
102
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
103
+ more detail.
104
+ return_dict (`bool`, *optional*):
105
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
106
+ """
107
+
108
+
109
+ def create_sinusoidal_positions(num_pos, dim):
110
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
111
+ sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
112
+ sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
113
+
114
+ sentinel = dim // 2 + dim % 2
115
+ out = np.zeros((num_pos, dim))
116
+ out[:, 0:sentinel] = sin
117
+ out[:, sentinel:] = cos
118
+
119
+ return jnp.array(out)
120
+
121
+
122
+ def rotate_every_two(tensor):
123
+ rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
124
+ rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
125
+ return rotate_half_tensor
126
+
127
+
128
+ def apply_rotary_pos_emb(tensor, sincos):
129
+ sin_pos, cos_pos = sincos
130
+ sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
131
+ cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
132
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
133
+
134
+
135
+ class FlaxGPTJAttention(nn.Module):
136
+ config: GPTJConfig
137
+ dtype: jnp.dtype = jnp.float32
138
+ causal: bool = True
139
+ is_cross_attention: bool = False
140
+
141
+ def setup(self):
142
+ config = self.config
143
+ self.embed_dim = config.hidden_size
144
+ self.num_heads = config.num_attention_heads
145
+ self.head_dim = self.embed_dim // self.num_heads
146
+
147
+ self.rotary_dim = config.rotary_dim
148
+
149
+ dense = partial(
150
+ nn.Dense,
151
+ self.embed_dim,
152
+ use_bias=False,
153
+ dtype=self.dtype,
154
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
155
+ )
156
+
157
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
158
+ self.out_proj = dense()
159
+
160
+ self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
161
+
162
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
163
+
164
+ pos_embd_dim = self.rotary_dim or self.embed_dim
165
+ self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim)
166
+
167
+ def _split_heads(self, hidden_states):
168
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
169
+
170
+ def _merge_heads(self, hidden_states):
171
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
172
+
173
+ @nn.compact
174
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
175
+ """
176
+ This function takes projected key, value states from a single input token and concatenates the states to cached
177
+ states from previous steps. This function is slighly adapted from the official Flax repository:
178
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
179
+ """
180
+ # detect if we're initializing by absence of existing cache data.
181
+ is_initialized = self.has_variable("cache", "cached_key")
182
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
183
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
184
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
185
+
186
+ if is_initialized:
187
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
188
+ # update key, value caches with our new 1d spatial slices
189
+ cur_index = cache_index.value
190
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
191
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
192
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
193
+ cached_key.value = key
194
+ cached_value.value = value
195
+ num_updated_cache_vectors = query.shape[1]
196
+ cache_index.value = cache_index.value + num_updated_cache_vectors
197
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key
198
+ # positions that have already been generated and cached, not the remaining zero elements.
199
+ pad_mask = jnp.broadcast_to(
200
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
201
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
202
+ )
203
+ attention_mask = combine_masks(pad_mask, attention_mask)
204
+ return key, value, attention_mask
205
+
206
+ def __call__(
207
+ self,
208
+ hidden_states,
209
+ attention_mask,
210
+ position_ids,
211
+ deterministic: bool = True,
212
+ init_cache: bool = False,
213
+ output_attentions: bool = False,
214
+ ):
215
+ query = self.q_proj(hidden_states)
216
+ key = self.k_proj(hidden_states)
217
+ value = self.v_proj(hidden_states)
218
+
219
+ query = self._split_heads(query)
220
+ key = self._split_heads(key)
221
+ value = self._split_heads(value)
222
+
223
+ sincos = jnp.take(self.embed_positions, position_ids, axis=0)
224
+ sincos = jnp.split(sincos, 2, axis=-1)
225
+ if self.rotary_dim is not None:
226
+ k_rot = key[:, :, :, : self.rotary_dim]
227
+ k_pass = key[:, :, :, self.rotary_dim :]
228
+
229
+ q_rot = query[:, :, :, : self.rotary_dim]
230
+ q_pass = query[:, :, :, self.rotary_dim :]
231
+
232
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
233
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
234
+
235
+ key = jnp.concatenate([k_rot, k_pass], axis=-1)
236
+ query = jnp.concatenate([q_rot, q_pass], axis=-1)
237
+ else:
238
+ key = apply_rotary_pos_emb(key, sincos)
239
+ query = apply_rotary_pos_emb(query, sincos)
240
+
241
+ query_length, key_length = query.shape[1], key.shape[1]
242
+
243
+ if self.has_variable("cache", "cached_key"):
244
+ mask_shift = self.variables["cache"]["cache_index"]
245
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
246
+ causal_mask = lax.dynamic_slice(
247
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
248
+ )
249
+ else:
250
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
251
+
252
+ batch_size = hidden_states.shape[0]
253
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
254
+
255
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
256
+ attention_mask = combine_masks(attention_mask, causal_mask)
257
+
258
+ dropout_rng = None
259
+ if not deterministic and self.config.attn_pdrop > 0.0:
260
+ dropout_rng = self.make_rng("dropout")
261
+
262
+ # During fast autoregressive decoding, we feed one position at a time,
263
+ # and cache the keys and values step by step.
264
+ if self.has_variable("cache", "cached_key") or init_cache:
265
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
266
+
267
+ # transform boolean mask into float mask
268
+ attention_bias = lax.select(
269
+ attention_mask > 0,
270
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
271
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
272
+ )
273
+
274
+ # usual dot product attention
275
+ attn_weights = dot_product_attention_weights(
276
+ query,
277
+ key,
278
+ bias=attention_bias,
279
+ dropout_rng=dropout_rng,
280
+ dropout_rate=self.config.attn_pdrop,
281
+ deterministic=deterministic,
282
+ dtype=self.dtype,
283
+ precision=None,
284
+ )
285
+
286
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
287
+ attn_output = self._merge_heads(attn_output)
288
+ attn_output = self.out_proj(attn_output)
289
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
290
+
291
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
292
+ return outputs
293
+
294
+
295
+ class FlaxGPTJMLP(nn.Module):
296
+ config: GPTJConfig
297
+ intermediate_size: int
298
+ dtype: jnp.dtype = jnp.float32
299
+
300
+ def setup(self):
301
+ embed_dim = self.config.hidden_size
302
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
303
+
304
+ self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init)
305
+ self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init)
306
+
307
+ self.act = ACT2FN[self.config.activation_function]
308
+ self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
309
+
310
+ def __call__(self, hidden_states, deterministic: bool = True):
311
+ hidden_states = self.fc_in(hidden_states)
312
+ hidden_states = self.act(hidden_states)
313
+ hidden_states = self.fc_out(hidden_states)
314
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
315
+ return hidden_states
316
+
317
+
318
+ class FlaxGPTJBlock(nn.Module):
319
+ config: GPTJConfig
320
+ dtype: jnp.dtype = jnp.float32
321
+
322
+ def setup(self):
323
+ hidden_size = self.config.hidden_size
324
+ inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
325
+
326
+ self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
327
+ self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype)
328
+
329
+ self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype)
330
+
331
+ def __call__(
332
+ self,
333
+ hidden_states,
334
+ attention_mask=None,
335
+ position_ids=None,
336
+ deterministic: bool = True,
337
+ init_cache: bool = False,
338
+ output_attentions: bool = False,
339
+ ):
340
+ residual = hidden_states
341
+ hidden_states = self.ln_1(hidden_states)
342
+ attn_outputs = self.attn(
343
+ hidden_states,
344
+ attention_mask=attention_mask,
345
+ position_ids=position_ids,
346
+ deterministic=deterministic,
347
+ init_cache=init_cache,
348
+ output_attentions=output_attentions,
349
+ )
350
+ attn_output = attn_outputs[0]
351
+
352
+ feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
353
+ # residual connection
354
+ hidden_states = attn_output + feed_forward_hidden_states + residual
355
+
356
+ return (hidden_states,) + attn_outputs[1:]
357
+
358
+
359
+ class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel):
360
+ """
361
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
362
+ models.
363
+ """
364
+
365
+ config_class = GPTJConfig
366
+ base_model_prefix = "transformer"
367
+ module_class: nn.Module = None
368
+
369
+ def __init__(
370
+ self,
371
+ config: GPTJConfig,
372
+ input_shape: Tuple = (1, 1),
373
+ seed: int = 0,
374
+ dtype: jnp.dtype = jnp.float32,
375
+ _do_init: bool = True,
376
+ **kwargs,
377
+ ):
378
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
379
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
380
+
381
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
382
+ # init input tensors
383
+ input_ids = jnp.zeros(input_shape, dtype="i4")
384
+ attention_mask = jnp.ones_like(input_ids)
385
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
386
+ params_rng, dropout_rng = jax.random.split(rng)
387
+ rngs = {"params": params_rng, "dropout": dropout_rng}
388
+
389
+ if self.config.add_cross_attention:
390
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
391
+ encoder_attention_mask = attention_mask
392
+ module_init_outputs = self.module.init(
393
+ rngs,
394
+ input_ids,
395
+ attention_mask,
396
+ position_ids,
397
+ encoder_hidden_states,
398
+ encoder_attention_mask,
399
+ return_dict=False,
400
+ )
401
+ else:
402
+ module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
403
+
404
+ random_params = module_init_outputs["params"]
405
+
406
+ if params is not None:
407
+ random_params = flatten_dict(unfreeze(random_params))
408
+ params = flatten_dict(unfreeze(params))
409
+ for missing_key in self._missing_keys:
410
+ params[missing_key] = random_params[missing_key]
411
+ self._missing_keys = set()
412
+ return freeze(unflatten_dict(params))
413
+ else:
414
+ return random_params
415
+
416
+ def init_cache(self, batch_size, max_length):
417
+ r"""
418
+ Args:
419
+ batch_size (`int`):
420
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
421
+ max_length (`int`):
422
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
423
+ cache.
424
+ """
425
+ # init input variables to retrieve cache
426
+ input_ids = jnp.ones((batch_size, max_length))
427
+ attention_mask = jnp.ones_like(input_ids)
428
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
429
+
430
+ init_variables = self.module.init(
431
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
432
+ )
433
+ return init_variables["cache"]
434
+
435
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
436
+ def __call__(
437
+ self,
438
+ input_ids,
439
+ attention_mask=None,
440
+ position_ids=None,
441
+ params: dict = None,
442
+ past_key_values: dict = None,
443
+ dropout_rng: jax.random.PRNGKey = None,
444
+ train: bool = False,
445
+ output_attentions: Optional[bool] = None,
446
+ output_hidden_states: Optional[bool] = None,
447
+ return_dict: Optional[bool] = None,
448
+ ):
449
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
450
+ output_hidden_states = (
451
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
452
+ )
453
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
454
+
455
+ batch_size, sequence_length = input_ids.shape
456
+
457
+ if position_ids is None:
458
+ if past_key_values is not None:
459
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
460
+
461
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
462
+
463
+ if attention_mask is None:
464
+ attention_mask = jnp.ones((batch_size, sequence_length))
465
+
466
+ # Handle any PRNG if needed
467
+ rngs = {}
468
+ if dropout_rng is not None:
469
+ rngs["dropout"] = dropout_rng
470
+
471
+ inputs = {"params": params or self.params}
472
+
473
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
474
+ if past_key_values:
475
+ inputs["cache"] = past_key_values
476
+ mutable = ["cache"]
477
+ else:
478
+ mutable = False
479
+
480
+ outputs = self.module.apply(
481
+ inputs,
482
+ jnp.array(input_ids, dtype="i4"),
483
+ jnp.array(attention_mask, dtype="i4"),
484
+ jnp.array(position_ids, dtype="i4"),
485
+ not train,
486
+ False,
487
+ output_attentions,
488
+ output_hidden_states,
489
+ return_dict,
490
+ rngs=rngs,
491
+ mutable=mutable,
492
+ )
493
+
494
+ # add updated cache to model output
495
+ if past_key_values is not None and return_dict:
496
+ outputs, past_key_values = outputs
497
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
498
+ return outputs
499
+ elif past_key_values is not None and not return_dict:
500
+ outputs, past_key_values = outputs
501
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
502
+
503
+ return outputs
504
+
505
+
506
+ class FlaxGPTJBlockCollection(nn.Module):
507
+ config: GPTJConfig
508
+ dtype: jnp.dtype = jnp.float32
509
+
510
+ def setup(self):
511
+ self.blocks = [
512
+ FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
513
+ ]
514
+
515
+ def __call__(
516
+ self,
517
+ hidden_states,
518
+ attention_mask=None,
519
+ position_ids=None,
520
+ deterministic: bool = True,
521
+ init_cache: bool = False,
522
+ output_attentions: bool = False,
523
+ output_hidden_states: bool = False,
524
+ return_dict: bool = True,
525
+ ):
526
+ all_attentions = () if output_attentions else None
527
+ all_hidden_states = () if output_hidden_states else None
528
+
529
+ for block in self.blocks:
530
+ if output_hidden_states:
531
+ all_hidden_states += (hidden_states,)
532
+
533
+ layer_outputs = block(
534
+ hidden_states,
535
+ attention_mask,
536
+ position_ids=position_ids,
537
+ deterministic=deterministic,
538
+ init_cache=init_cache,
539
+ output_attentions=output_attentions,
540
+ )
541
+ hidden_states = layer_outputs[0]
542
+
543
+ if output_attentions:
544
+ all_attentions += (layer_outputs[1],)
545
+
546
+ # this contains possible `None` values - `FlaxGPTJModule` will filter them out
547
+ outputs = (hidden_states, all_hidden_states, all_attentions)
548
+
549
+ return outputs
550
+
551
+
552
+ class FlaxGPTJModule(nn.Module):
553
+ config: GPTJConfig
554
+ dtype: jnp.dtype = jnp.float32
555
+
556
+ def setup(self):
557
+ self.embed_dim = self.config.hidden_size
558
+
559
+ self.wte = nn.Embed(
560
+ self.config.vocab_size,
561
+ self.config.hidden_size,
562
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
563
+ )
564
+ self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
565
+ self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype)
566
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
567
+
568
+ def __call__(
569
+ self,
570
+ input_ids,
571
+ attention_mask,
572
+ position_ids,
573
+ deterministic=True,
574
+ init_cache: bool = False,
575
+ output_attentions: bool = False,
576
+ output_hidden_states: bool = False,
577
+ return_dict: bool = True,
578
+ ):
579
+ input_embeds = self.wte(input_ids.astype("i4"))
580
+
581
+ hidden_states = self.dropout(input_embeds, deterministic=deterministic)
582
+
583
+ outputs = self.h(
584
+ hidden_states,
585
+ attention_mask,
586
+ position_ids=position_ids,
587
+ deterministic=deterministic,
588
+ init_cache=init_cache,
589
+ output_attentions=output_attentions,
590
+ output_hidden_states=output_hidden_states,
591
+ return_dict=return_dict,
592
+ )
593
+
594
+ hidden_states = outputs[0]
595
+ hidden_states = self.ln_f(hidden_states)
596
+
597
+ if output_hidden_states:
598
+ all_hidden_states = outputs[1] + (hidden_states,)
599
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
600
+ else:
601
+ outputs = (hidden_states,) + outputs[1:]
602
+
603
+ if not return_dict:
604
+ return tuple(v for v in outputs if v is not None)
605
+
606
+ return FlaxBaseModelOutput(
607
+ last_hidden_state=hidden_states,
608
+ hidden_states=outputs[1],
609
+ attentions=outputs[-1],
610
+ )
611
+
612
+
613
+ @add_start_docstrings(
614
+ "The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.",
615
+ GPTJ_START_DOCSTRING,
616
+ )
617
+ class FlaxGPTJModel(FlaxGPTJPreTrainedModel):
618
+ module_class = FlaxGPTJModule
619
+
620
+
621
+ append_call_sample_docstring(
622
+ FlaxGPTJModel,
623
+ _CHECKPOINT_FOR_DOC,
624
+ FlaxCausalLMOutput,
625
+ _CONFIG_FOR_DOC,
626
+ )
627
+
628
+
629
+ class FlaxGPTJForCausalLMModule(nn.Module):
630
+ config: GPTJConfig
631
+ dtype: jnp.dtype = jnp.float32
632
+
633
+ def setup(self):
634
+ self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype)
635
+ self.lm_head = nn.Dense(
636
+ self.config.vocab_size,
637
+ dtype=self.dtype,
638
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
639
+ )
640
+
641
+ def __call__(
642
+ self,
643
+ input_ids,
644
+ attention_mask,
645
+ position_ids,
646
+ deterministic: bool = True,
647
+ init_cache: bool = False,
648
+ output_attentions: bool = False,
649
+ output_hidden_states: bool = False,
650
+ return_dict: bool = True,
651
+ ):
652
+ outputs = self.transformer(
653
+ input_ids,
654
+ attention_mask,
655
+ position_ids,
656
+ deterministic=deterministic,
657
+ init_cache=init_cache,
658
+ output_attentions=output_attentions,
659
+ output_hidden_states=output_hidden_states,
660
+ return_dict=return_dict,
661
+ )
662
+
663
+ hidden_states = outputs[0]
664
+
665
+ if self.config.tie_word_embeddings:
666
+ shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
667
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
668
+ else:
669
+ lm_logits = self.lm_head(hidden_states)
670
+
671
+ if not return_dict:
672
+ return (lm_logits,) + outputs[1:]
673
+
674
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
675
+
676
+
677
+ @add_start_docstrings(
678
+ """
679
+ The GPTJ Model transformer with a language modeling head on top.
680
+ """,
681
+ GPTJ_START_DOCSTRING,
682
+ )
683
+ class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel):
684
+ module_class = FlaxGPTJForCausalLMModule
685
+
686
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
687
+ # initializing the cache
688
+ batch_size, seq_length = input_ids.shape
689
+
690
+ past_key_values = self.init_cache(batch_size, max_length)
691
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
692
+ # But since GPTJ uses a causal mask, those positions are masked anyways.
693
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
694
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
695
+ if attention_mask is not None:
696
+ position_ids = attention_mask.cumsum(axis=-1) - 1
697
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
698
+ else:
699
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
700
+
701
+ return {
702
+ "past_key_values": past_key_values,
703
+ "attention_mask": extended_attention_mask,
704
+ "position_ids": position_ids,
705
+ }
706
+
707
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
708
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
709
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
710
+ return model_kwargs
711
+
712
+
713
+ append_call_sample_docstring(
714
+ FlaxGPTJForCausalLM,
715
+ _CHECKPOINT_FOR_DOC,
716
+ FlaxCausalLMOutput,
717
+ _CONFIG_FOR_DOC,
718
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py ADDED
@@ -0,0 +1,1427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPT-J model."""
16
+
17
+ import warnings
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.fx
22
+ import torch.nn.functional as F
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ CausalLMOutputWithPast,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutputWithPast,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_flash_attn_2_available,
40
+ is_flash_attn_greater_or_equal_2_10,
41
+ is_torch_fx_proxy,
42
+ logging,
43
+ )
44
+ from ...utils.model_parallel_utils import assert_device_map, get_device_map
45
+ from .configuration_gptj import GPTJConfig
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
56
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
57
+ _CONFIG_FOR_DOC = "GPTJConfig"
58
+
59
+
60
+ from ..deprecated._archive_maps import GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
61
+
62
+
63
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
64
+ def _get_unpad_data(attention_mask):
65
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
66
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
67
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
68
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
69
+ return (
70
+ indices,
71
+ cu_seqlens,
72
+ max_seqlen_in_batch,
73
+ )
74
+
75
+
76
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
77
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
78
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
79
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
80
+
81
+
82
+ @torch.fx.wrap
83
+ def get_embed_positions(embed_positions, position_ids):
84
+ return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
85
+
86
+
87
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
88
+ x1 = x[:, :, :, ::2]
89
+ x2 = x[:, :, :, 1::2]
90
+ x = torch.stack((-x2, x1), dim=-1)
91
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
92
+
93
+
94
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
95
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
96
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
97
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
98
+
99
+
100
+ class GPTJAttention(nn.Module):
101
+ def __init__(self, config):
102
+ super().__init__()
103
+ self.config = config
104
+ max_positions = config.max_position_embeddings
105
+ self.register_buffer(
106
+ "bias",
107
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
108
+ 1, 1, max_positions, max_positions
109
+ ),
110
+ persistent=False,
111
+ )
112
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
113
+
114
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
115
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
116
+
117
+ self.is_causal = True
118
+
119
+ self.embed_dim = config.hidden_size
120
+ self.num_attention_heads = config.num_attention_heads
121
+ self.head_dim = self.embed_dim // self.num_attention_heads
122
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
123
+ raise ValueError(
124
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
125
+ f" `num_attention_heads`: {self.num_attention_heads})."
126
+ )
127
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
128
+
129
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
130
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
131
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
132
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
133
+ self.rotary_dim = config.rotary_dim
134
+ pos_embd_dim = self.rotary_dim or self.embed_dim
135
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
136
+
137
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
138
+ """
139
+ Splits hidden dim into attn_head_size and num_attention_heads
140
+ """
141
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
142
+ tensor = tensor.view(new_shape)
143
+ if rotary:
144
+ return tensor
145
+ if len(tensor.shape) == 5:
146
+ return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
147
+ elif len(tensor.shape) == 4:
148
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
149
+ else:
150
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
151
+
152
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
153
+ """
154
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
155
+ """
156
+ if len(tensor.shape) == 5:
157
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
158
+ elif len(tensor.shape) == 4:
159
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
160
+ else:
161
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
162
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
163
+ return tensor.view(new_shape)
164
+
165
+ def _attn(
166
+ self,
167
+ query,
168
+ key,
169
+ value,
170
+ attention_mask=None,
171
+ head_mask=None,
172
+ ):
173
+ # compute causal mask from causal mask buffer
174
+ query_length, key_length = query.size(-2), key.size(-2)
175
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
176
+
177
+ # Keep the attention weights computation in fp32 to avoid overflow issues
178
+ query = query.to(torch.float32)
179
+ key = key.to(torch.float32)
180
+
181
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
182
+
183
+ mask_value = torch.finfo(attn_weights.dtype).min
184
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
185
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
186
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
187
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
188
+
189
+ attn_weights = attn_weights / self.scale_attn
190
+
191
+ if attention_mask is not None:
192
+ # Apply the attention mask
193
+ attn_weights = attn_weights + attention_mask
194
+
195
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
196
+ attn_weights = attn_weights.to(value.dtype)
197
+ attn_weights = self.attn_dropout(attn_weights)
198
+
199
+ # Mask heads if we want to
200
+ if head_mask is not None:
201
+ attn_weights = attn_weights * head_mask
202
+
203
+ attn_output = torch.matmul(attn_weights, value)
204
+
205
+ return attn_output, attn_weights
206
+
207
+ def _get_embed_positions(self, position_ids):
208
+ embed_positions = self.embed_positions
209
+ if embed_positions.device != position_ids.device:
210
+ embed_positions = embed_positions.to(position_ids.device)
211
+ self.embed_positions = embed_positions
212
+ return embed_positions.repeat(position_ids.shape[0], 1, 1)
213
+
214
+ def forward(
215
+ self,
216
+ hidden_states: torch.FloatTensor,
217
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
218
+ attention_mask: Optional[torch.FloatTensor] = None,
219
+ position_ids: Optional[torch.LongTensor] = None,
220
+ head_mask: Optional[torch.FloatTensor] = None,
221
+ use_cache: Optional[bool] = False,
222
+ output_attentions: Optional[bool] = False,
223
+ ) -> Union[
224
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
225
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
226
+ ]:
227
+ query = self.q_proj(hidden_states)
228
+ key = self.k_proj(hidden_states)
229
+ value = self.v_proj(hidden_states)
230
+
231
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
232
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
233
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
234
+
235
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
236
+ # The logic to conditionally copy to GPU could not be traced, so we do this
237
+ # every time in the torch.fx case
238
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
239
+ else:
240
+ embed_positions = self._get_embed_positions(position_ids)
241
+
242
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
243
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
244
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
245
+
246
+ if self.rotary_dim is not None:
247
+ k_rot = key[:, :, :, : self.rotary_dim]
248
+ k_pass = key[:, :, :, self.rotary_dim :]
249
+
250
+ q_rot = query[:, :, :, : self.rotary_dim]
251
+ q_pass = query[:, :, :, self.rotary_dim :]
252
+
253
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
254
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
255
+
256
+ key = torch.cat([k_rot, k_pass], dim=-1)
257
+ query = torch.cat([q_rot, q_pass], dim=-1)
258
+ else:
259
+ key = apply_rotary_pos_emb(key, sin, cos)
260
+ query = apply_rotary_pos_emb(query, sin, cos)
261
+
262
+ key = key.permute(0, 2, 1, 3)
263
+ query = query.permute(0, 2, 1, 3)
264
+
265
+ if layer_past is not None:
266
+ past_key = layer_past[0]
267
+ past_value = layer_past[1]
268
+ key = torch.cat((past_key, key), dim=-2)
269
+ value = torch.cat((past_value, value), dim=-2)
270
+
271
+ if use_cache is True:
272
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
273
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
274
+ present = (key.to(hidden_states.dtype), value)
275
+ else:
276
+ present = None
277
+
278
+ # compute self-attention: V x Softmax(QK^T)
279
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
280
+
281
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
282
+ attn_output = self.out_proj(attn_output)
283
+ attn_output = self.resid_dropout(attn_output)
284
+
285
+ outputs = (attn_output, present)
286
+ if output_attentions:
287
+ outputs += (attn_weights,)
288
+
289
+ return outputs # a, present, (attentions)
290
+
291
+
292
+ class GPTJFlashAttention2(GPTJAttention):
293
+ """
294
+ GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays
295
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
296
+ flash attention and deal with padding tokens in case the input contains any of them.
297
+ """
298
+
299
+ def __init__(self, *args, **kwargs):
300
+ super().__init__(*args, **kwargs)
301
+
302
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
303
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
304
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
305
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.FloatTensor,
310
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
311
+ attention_mask: Optional[torch.FloatTensor] = None,
312
+ position_ids: Optional[torch.LongTensor] = None,
313
+ head_mask: Optional[torch.FloatTensor] = None,
314
+ use_cache: Optional[bool] = False,
315
+ output_attentions: Optional[bool] = False,
316
+ ) -> Union[
317
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
318
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
319
+ ]:
320
+ query = self.q_proj(hidden_states)
321
+ key = self.k_proj(hidden_states)
322
+ value = self.v_proj(hidden_states)
323
+
324
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
325
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
326
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
327
+
328
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
329
+ # The logic to conditionally copy to GPU could not be traced, so we do this
330
+ # every time in the torch.fx case
331
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
332
+ else:
333
+ embed_positions = self._get_embed_positions(position_ids)
334
+
335
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
336
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
337
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
338
+
339
+ if self.rotary_dim is not None:
340
+ k_rot = key[:, :, :, : self.rotary_dim]
341
+ k_pass = key[:, :, :, self.rotary_dim :]
342
+
343
+ q_rot = query[:, :, :, : self.rotary_dim]
344
+ q_pass = query[:, :, :, self.rotary_dim :]
345
+
346
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
347
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
348
+
349
+ key = torch.cat([k_rot, k_pass], dim=-1)
350
+ query = torch.cat([q_rot, q_pass], dim=-1)
351
+ else:
352
+ key = apply_rotary_pos_emb(key, sin, cos)
353
+ query = apply_rotary_pos_emb(query, sin, cos)
354
+
355
+ # tanspose to have the desired shape
356
+ # before transpose: batch_size x seq_length x num_attention_heads x head_dim
357
+ # after transpose: batch_size x num_attention_heads x seq_length x head_dim
358
+ key = key.permute(0, 2, 1, 3)
359
+ query = query.permute(0, 2, 1, 3)
360
+ # value: batch_size x num_attention_heads x seq_length x head_dim
361
+
362
+ if layer_past is not None:
363
+ past_key = layer_past[0]
364
+ past_value = layer_past[1]
365
+ key = torch.cat((past_key, key), dim=-2)
366
+ value = torch.cat((past_value, value), dim=-2)
367
+
368
+ if use_cache is True:
369
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
370
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
371
+ present = (key.to(hidden_states.dtype), value)
372
+ else:
373
+ present = None
374
+
375
+ # The Flash attention requires the input to have the shape
376
+ # batch_size x seq_length x head_dim x hidden_dim
377
+ # therefore we need to keep the original shape for query and key, and reshape value
378
+ # to have the correct shape.
379
+ key = key.permute(0, 2, 1, 3).contiguous()
380
+ query = query.permute(0, 2, 1, 3).contiguous()
381
+ value = value.permute(0, 2, 1, 3).contiguous()
382
+
383
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
384
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
385
+ # cast them back in the correct dtype just to be sure everything works as expected.
386
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
387
+ # in fp32. (LlamaRMSNorm handles it correctly)
388
+
389
+ input_dtype = query.dtype
390
+ if input_dtype == torch.float32:
391
+ if torch.is_autocast_enabled():
392
+ target_dtype = torch.get_autocast_gpu_dtype()
393
+ # Handle the case where the model is quantized
394
+ elif hasattr(self.config, "_pre_quantization_dtype"):
395
+ target_dtype = self.config._pre_quantization_dtype
396
+ else:
397
+ target_dtype = self.q_proj.weight.dtype
398
+
399
+ logger.warning_once(
400
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
401
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
402
+ f" {target_dtype}."
403
+ )
404
+
405
+ query = query.to(target_dtype)
406
+ key = key.to(target_dtype)
407
+ value = value.to(target_dtype)
408
+
409
+ attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj
410
+
411
+ query_length = query.shape[1]
412
+
413
+ # Compute attention
414
+ attn_weights = self._flash_attention_forward(
415
+ query,
416
+ key,
417
+ value,
418
+ attention_mask,
419
+ query_length,
420
+ dropout=attention_dropout,
421
+ )
422
+
423
+ # Reshape outputs
424
+ attn_output = attn_weights.reshape(
425
+ attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3]
426
+ )
427
+ attn_output = self.out_proj(attn_output)
428
+ attn_output = self.resid_dropout(attn_output)
429
+
430
+ outputs = (attn_output, present)
431
+ if output_attentions:
432
+ outputs += (attn_weights,)
433
+
434
+ return outputs
435
+
436
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
437
+ def _flash_attention_forward(
438
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
439
+ ):
440
+ """
441
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
442
+ first unpad the input, then computes the attention scores and pad the final attention scores.
443
+
444
+ Args:
445
+ query_states (`torch.Tensor`):
446
+ Input query states to be passed to Flash Attention API
447
+ key_states (`torch.Tensor`):
448
+ Input key states to be passed to Flash Attention API
449
+ value_states (`torch.Tensor`):
450
+ Input value states to be passed to Flash Attention API
451
+ attention_mask (`torch.Tensor`):
452
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
453
+ position of padding tokens and 1 for the position of non-padding tokens.
454
+ dropout (`float`):
455
+ Attention dropout
456
+ softmax_scale (`float`, *optional*):
457
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
458
+ """
459
+ if not self._flash_attn_uses_top_left_mask:
460
+ causal = self.is_causal
461
+ else:
462
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
463
+ causal = self.is_causal and query_length != 1
464
+
465
+ # Contains at least one padding token in the sequence
466
+ if attention_mask is not None:
467
+ batch_size = query_states.shape[0]
468
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
469
+ query_states, key_states, value_states, attention_mask, query_length
470
+ )
471
+
472
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
473
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
474
+
475
+ attn_output_unpad = flash_attn_varlen_func(
476
+ query_states,
477
+ key_states,
478
+ value_states,
479
+ cu_seqlens_q=cu_seqlens_q,
480
+ cu_seqlens_k=cu_seqlens_k,
481
+ max_seqlen_q=max_seqlen_in_batch_q,
482
+ max_seqlen_k=max_seqlen_in_batch_k,
483
+ dropout_p=dropout,
484
+ softmax_scale=softmax_scale,
485
+ causal=causal,
486
+ )
487
+
488
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
489
+ else:
490
+ attn_output = flash_attn_func(
491
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
492
+ )
493
+
494
+ return attn_output
495
+
496
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads
497
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
498
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
499
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
500
+
501
+ key_layer = index_first_axis(
502
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
503
+ )
504
+ value_layer = index_first_axis(
505
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
506
+ )
507
+ if query_length == kv_seq_len:
508
+ query_layer = index_first_axis(
509
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k
510
+ )
511
+ cu_seqlens_q = cu_seqlens_k
512
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
513
+ indices_q = indices_k
514
+ elif query_length == 1:
515
+ max_seqlen_in_batch_q = 1
516
+ cu_seqlens_q = torch.arange(
517
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
518
+ ) # There is a memcpy here, that is very bad.
519
+ indices_q = cu_seqlens_q[:-1]
520
+ query_layer = query_layer.squeeze(1)
521
+ else:
522
+ # The -q_len: slice assumes left padding.
523
+ attention_mask = attention_mask[:, -query_length:]
524
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
525
+
526
+ return (
527
+ query_layer,
528
+ key_layer,
529
+ value_layer,
530
+ indices_q,
531
+ (cu_seqlens_q, cu_seqlens_k),
532
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
533
+ )
534
+
535
+
536
+ GPTJ_ATTENTION_CLASSES = {
537
+ "eager": GPTJAttention,
538
+ "flash_attention_2": GPTJFlashAttention2,
539
+ }
540
+
541
+
542
+ class GPTJMLP(nn.Module):
543
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
544
+ super().__init__()
545
+ embed_dim = config.n_embd
546
+
547
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
548
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
549
+
550
+ self.act = ACT2FN[config.activation_function]
551
+ self.dropout = nn.Dropout(config.resid_pdrop)
552
+
553
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
554
+ hidden_states = self.fc_in(hidden_states)
555
+ hidden_states = self.act(hidden_states)
556
+ hidden_states = self.fc_out(hidden_states)
557
+ hidden_states = self.dropout(hidden_states)
558
+ return hidden_states
559
+
560
+
561
+ class GPTJBlock(nn.Module):
562
+ def __init__(self, config):
563
+ super().__init__()
564
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
565
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
566
+ self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config)
567
+ self.mlp = GPTJMLP(inner_dim, config)
568
+
569
+ def forward(
570
+ self,
571
+ hidden_states: Optional[torch.FloatTensor],
572
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
573
+ attention_mask: Optional[torch.FloatTensor] = None,
574
+ position_ids: Optional[torch.LongTensor] = None,
575
+ head_mask: Optional[torch.FloatTensor] = None,
576
+ use_cache: Optional[bool] = False,
577
+ output_attentions: Optional[bool] = False,
578
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
579
+ residual = hidden_states
580
+ hidden_states = self.ln_1(hidden_states)
581
+ attn_outputs = self.attn(
582
+ hidden_states=hidden_states,
583
+ layer_past=layer_past,
584
+ attention_mask=attention_mask,
585
+ position_ids=position_ids,
586
+ head_mask=head_mask,
587
+ use_cache=use_cache,
588
+ output_attentions=output_attentions,
589
+ )
590
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
591
+ outputs = attn_outputs[1:]
592
+
593
+ feed_forward_hidden_states = self.mlp(hidden_states)
594
+ hidden_states = attn_output + feed_forward_hidden_states + residual
595
+
596
+ if use_cache:
597
+ outputs = (hidden_states,) + outputs
598
+ else:
599
+ outputs = (hidden_states,) + outputs[1:]
600
+
601
+ return outputs # hidden_states, present, (attentions)
602
+
603
+
604
+ class GPTJPreTrainedModel(PreTrainedModel):
605
+ """
606
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
607
+ models.
608
+ """
609
+
610
+ config_class = GPTJConfig
611
+ base_model_prefix = "transformer"
612
+ is_parallelizable = True
613
+ supports_gradient_checkpointing = True
614
+ _no_split_modules = ["GPTJBlock"]
615
+ _skip_keys_device_placement = "past_key_values"
616
+ _supports_flash_attn_2 = True
617
+
618
+ def __init__(self, *inputs, **kwargs):
619
+ super().__init__(*inputs, **kwargs)
620
+
621
+ def _init_weights(self, module):
622
+ """Initialize the weights."""
623
+ if isinstance(module, (nn.Linear,)):
624
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
625
+ # cf https://github.com/pytorch/pytorch/pull/5617
626
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
627
+ if module.bias is not None:
628
+ module.bias.data.zero_()
629
+ elif isinstance(module, nn.Embedding):
630
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
631
+ if module.padding_idx is not None:
632
+ module.weight.data[module.padding_idx].zero_()
633
+ elif isinstance(module, nn.LayerNorm):
634
+ module.bias.data.zero_()
635
+ module.weight.data.fill_(1.0)
636
+
637
+
638
+ GPTJ_START_DOCSTRING = r"""
639
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
640
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
641
+ behavior.
642
+
643
+ Parameters:
644
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
645
+ Initializing with a config file does not load the weights associated with the model, only the
646
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
647
+ """
648
+
649
+ GPTJ_INPUTS_DOCSTRING = r"""
650
+ Args:
651
+ input_ids (`torch.LongTensor` of shape `({0})`):
652
+ Indices of input sequence tokens in the vocabulary.
653
+
654
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
655
+ [`PreTrainedTokenizer.__call__`] for details.
656
+
657
+ [What are input IDs?](../glossary#input-ids)
658
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
659
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
660
+
661
+ - 1 for tokens that are **not masked**,
662
+ - 0 for tokens that are **masked**.
663
+
664
+ [What are attention masks?](../glossary#attention-mask)
665
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
666
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
667
+ 1]`:
668
+
669
+ - 0 corresponds to a *sentence A* token,
670
+ - 1 corresponds to a *sentence B* token.
671
+
672
+ [What are token type IDs?](../glossary#token-type-ids)
673
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
674
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
675
+ config.n_positions - 1]`.
676
+
677
+ [What are position IDs?](../glossary#position-ids)
678
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
679
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
680
+
681
+ - 1 indicates the head is **not masked**,
682
+ - 0 indicates the head is **masked**.
683
+
684
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
685
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
686
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
687
+ model's internal embedding lookup matrix.
688
+ output_attentions (`bool`, *optional*):
689
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
690
+ tensors for more detail.
691
+ output_hidden_states (`bool`, *optional*):
692
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
693
+ more detail.
694
+ return_dict (`bool`, *optional*):
695
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
696
+ """
697
+
698
+ PARALLELIZE_DOCSTRING = r"""
699
+ This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
700
+ attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
701
+ across all devices.
702
+
703
+ Args:
704
+ device_map (`Dict[int, list]`, optional, defaults to None):
705
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
706
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
707
+ have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
708
+ following number of attention modules:
709
+
710
+ - gpt-j-6B: 28
711
+
712
+ Example:
713
+
714
+ ```python
715
+ # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
716
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
717
+ device_map = {
718
+ 0: [0, 1, 2, 3, 4, 5, 6],
719
+ 1: [7, 8, 9, 10, 11, 12, 13],
720
+ 2: [14, 15, 16, 17, 18, 19, 20],
721
+ 3: [21, 22, 23, 24, 25, 26, 27],
722
+ }
723
+ model.parallelize(device_map)
724
+ ```
725
+ """
726
+
727
+ DEPARALLELIZE_DOCSTRING = r"""
728
+ Moves the model to CPU from a model parallel state.
729
+
730
+ Example:
731
+
732
+ ```python
733
+ # On a 4 GPU machine with gpt-j-6B:
734
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
735
+ device_map = {
736
+ 0: [0, 1, 2, 3, 4, 5, 6],
737
+ 1: [7, 8, 9, 10, 11, 12, 13],
738
+ 2: [14, 15, 16, 17, 18, 19, 20],
739
+ 3: [21, 22, 23, 24, 25, 26, 27],
740
+ }
741
+ model.parallelize(device_map) # Splits the model across several devices
742
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
743
+ ```
744
+ """
745
+
746
+
747
+ @add_start_docstrings(
748
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
749
+ GPTJ_START_DOCSTRING,
750
+ )
751
+ class GPTJModel(GPTJPreTrainedModel):
752
+ def __init__(self, config):
753
+ super().__init__(config)
754
+
755
+ self.embed_dim = config.n_embd
756
+ self.vocab_size = config.vocab_size
757
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
758
+ self.drop = nn.Dropout(config.embd_pdrop)
759
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
760
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
761
+
762
+ # Model parallel
763
+ self.model_parallel = False
764
+ self.device_map = None
765
+ self.gradient_checkpointing = False
766
+
767
+ # Initialize weights and apply final processing
768
+ self.post_init()
769
+
770
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
771
+
772
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
773
+ def parallelize(self, device_map=None):
774
+ warnings.warn(
775
+ "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
776
+ " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
777
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
778
+ " ...}",
779
+ FutureWarning,
780
+ )
781
+ # Check validity of device_map
782
+ self.device_map = (
783
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
784
+ )
785
+ assert_device_map(self.device_map, len(self.h))
786
+ self.model_parallel = True
787
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
788
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
789
+ self.wte = self.wte.to(self.first_device)
790
+ # Load onto devices
791
+ for k, v in self.device_map.items():
792
+ for block in v:
793
+ cuda_device = "cuda:" + str(k)
794
+ self.h[block] = self.h[block].to(cuda_device)
795
+ # ln_f to last
796
+ self.ln_f = self.ln_f.to(self.last_device)
797
+
798
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
799
+ def deparallelize(self):
800
+ warnings.warn(
801
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
802
+ FutureWarning,
803
+ )
804
+ self.model_parallel = False
805
+ self.device_map = None
806
+ self.first_device = "cpu"
807
+ self.last_device = "cpu"
808
+ self.wte = self.wte.to("cpu")
809
+ for index in range(len(self.h)):
810
+ self.h[index] = self.h[index].to("cpu")
811
+ self.ln_f = self.ln_f.to("cpu")
812
+ torch.cuda.empty_cache()
813
+
814
+ def get_input_embeddings(self):
815
+ return self.wte
816
+
817
+ def set_input_embeddings(self, new_embeddings):
818
+ self.wte = new_embeddings
819
+
820
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
821
+ @add_code_sample_docstrings(
822
+ checkpoint=_CHECKPOINT_FOR_DOC,
823
+ output_type=BaseModelOutputWithPast,
824
+ config_class=_CONFIG_FOR_DOC,
825
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
826
+ )
827
+ def forward(
828
+ self,
829
+ input_ids: Optional[torch.LongTensor] = None,
830
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
831
+ attention_mask: Optional[torch.FloatTensor] = None,
832
+ token_type_ids: Optional[torch.LongTensor] = None,
833
+ position_ids: Optional[torch.LongTensor] = None,
834
+ head_mask: Optional[torch.FloatTensor] = None,
835
+ inputs_embeds: Optional[torch.FloatTensor] = None,
836
+ use_cache: Optional[bool] = None,
837
+ output_attentions: Optional[bool] = None,
838
+ output_hidden_states: Optional[bool] = None,
839
+ return_dict: Optional[bool] = None,
840
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
846
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
847
+
848
+ if input_ids is not None and inputs_embeds is not None:
849
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
850
+ elif input_ids is not None:
851
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
852
+ input_shape = input_ids.size()
853
+ input_ids = input_ids.view(-1, input_shape[-1])
854
+ batch_size = input_ids.shape[0]
855
+ elif inputs_embeds is not None:
856
+ input_shape = inputs_embeds.size()[:-1]
857
+ batch_size = inputs_embeds.shape[0]
858
+ else:
859
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
860
+
861
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
862
+
863
+ if token_type_ids is not None:
864
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
865
+
866
+ if past_key_values is None:
867
+ past_length = 0
868
+ past_key_values = tuple([None] * len(self.h))
869
+ else:
870
+ past_length = past_key_values[0][0].size(-2)
871
+
872
+ if position_ids is None:
873
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
874
+ position_ids = position_ids.unsqueeze(0)
875
+
876
+ if not self._use_flash_attention_2:
877
+ # Attention mask.
878
+ if attention_mask is not None:
879
+ if batch_size <= 0:
880
+ raise ValueError("batch_size has to be defined and > 0")
881
+ attention_mask = attention_mask.view(batch_size, -1)
882
+ # We create a 3D attention mask from a 2D tensor mask.
883
+ # Sizes are [batch_size, 1, 1, to_seq_length]
884
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
885
+ # this attention mask is more simple than the triangular masking of causal attention
886
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
887
+ attention_mask = attention_mask[:, None, None, :]
888
+
889
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
890
+ # masked positions, this operation will create a tensor which is 0.0 for
891
+ # positions we want to attend and the dtype's smallest value for masked positions.
892
+ # Since we are adding it to the raw scores before the softmax, this is
893
+ # effectively the same as removing these entirely.
894
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
895
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
896
+
897
+ # Prepare head mask if needed
898
+ # 1.0 in head_mask indicate we keep the head
899
+ # attention_probs has shape bsz x num_attention_heads x N x N
900
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
901
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
902
+
903
+ if inputs_embeds is None:
904
+ inputs_embeds = self.wte(input_ids)
905
+
906
+ hidden_states = inputs_embeds
907
+
908
+ if token_type_ids is not None:
909
+ token_type_embeds = self.wte(token_type_ids)
910
+ hidden_states = hidden_states + token_type_embeds
911
+
912
+ hidden_states = self.drop(hidden_states)
913
+
914
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
915
+
916
+ if self.gradient_checkpointing and self.training:
917
+ if use_cache:
918
+ logger.warning_once(
919
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
920
+ )
921
+ use_cache = False
922
+
923
+ presents = () if use_cache else None
924
+ all_self_attentions = () if output_attentions else None
925
+ all_hidden_states = () if output_hidden_states else None
926
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
927
+ # Model parallel
928
+ if self.model_parallel:
929
+ torch.cuda.set_device(hidden_states.device)
930
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
931
+ if layer_past is not None:
932
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
933
+ # Ensure that attention_mask is always on the same device as hidden_states
934
+ if attention_mask is not None:
935
+ attention_mask = attention_mask.to(hidden_states.device)
936
+ if isinstance(head_mask, torch.Tensor):
937
+ head_mask = head_mask.to(hidden_states.device)
938
+ if output_hidden_states:
939
+ all_hidden_states = all_hidden_states + (hidden_states,)
940
+
941
+ if self.gradient_checkpointing and self.training:
942
+ outputs = self._gradient_checkpointing_func(
943
+ block.__call__,
944
+ hidden_states,
945
+ None,
946
+ attention_mask,
947
+ position_ids,
948
+ head_mask[i],
949
+ use_cache,
950
+ output_attentions,
951
+ )
952
+ else:
953
+ outputs = block(
954
+ hidden_states=hidden_states,
955
+ layer_past=layer_past,
956
+ attention_mask=attention_mask,
957
+ position_ids=position_ids,
958
+ head_mask=head_mask[i],
959
+ use_cache=use_cache,
960
+ output_attentions=output_attentions,
961
+ )
962
+
963
+ hidden_states = outputs[0]
964
+ if use_cache is True:
965
+ presents = presents + (outputs[1],)
966
+
967
+ if output_attentions:
968
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
969
+
970
+ # Model Parallel: If it's the last layer for that device, put things on the next device
971
+ if self.model_parallel:
972
+ for k, v in self.device_map.items():
973
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
974
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
975
+
976
+ hidden_states = self.ln_f(hidden_states)
977
+
978
+ hidden_states = hidden_states.view(output_shape)
979
+ # Add last hidden state
980
+ if output_hidden_states:
981
+ all_hidden_states = all_hidden_states + (hidden_states,)
982
+
983
+ if not return_dict:
984
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
985
+
986
+ return BaseModelOutputWithPast(
987
+ last_hidden_state=hidden_states,
988
+ past_key_values=presents,
989
+ hidden_states=all_hidden_states,
990
+ attentions=all_self_attentions,
991
+ )
992
+
993
+
994
+ @add_start_docstrings(
995
+ """
996
+ The GPT-J Model transformer with a language modeling head on top.
997
+ """,
998
+ GPTJ_START_DOCSTRING,
999
+ )
1000
+ class GPTJForCausalLM(GPTJPreTrainedModel):
1001
+ _tied_weights_keys = ["lm_head.weight"]
1002
+
1003
+ def __init__(self, config):
1004
+ super().__init__(config)
1005
+ self.transformer = GPTJModel(config)
1006
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
1007
+
1008
+ # Model parallel
1009
+ self.model_parallel = False
1010
+ self.device_map = None
1011
+
1012
+ # Initialize weights and apply final processing
1013
+ self.post_init()
1014
+
1015
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1016
+ def parallelize(self, device_map=None):
1017
+ warnings.warn(
1018
+ "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
1019
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
1020
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
1021
+ " 0, 'transformer.h.1': 1, ...}",
1022
+ FutureWarning,
1023
+ )
1024
+ self.device_map = (
1025
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1026
+ if device_map is None
1027
+ else device_map
1028
+ )
1029
+ assert_device_map(self.device_map, len(self.transformer.h))
1030
+ self.transformer.parallelize(self.device_map)
1031
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1032
+ self.model_parallel = True
1033
+
1034
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1035
+ def deparallelize(self):
1036
+ warnings.warn(
1037
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
1038
+ FutureWarning,
1039
+ )
1040
+ self.transformer.deparallelize()
1041
+ self.transformer = self.transformer.to("cpu")
1042
+ self.lm_head = self.lm_head.to("cpu")
1043
+ self.model_parallel = False
1044
+ torch.cuda.empty_cache()
1045
+
1046
+ def get_output_embeddings(self):
1047
+ return self.lm_head
1048
+
1049
+ def set_output_embeddings(self, new_embeddings):
1050
+ self.lm_head = new_embeddings
1051
+
1052
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
1053
+ token_type_ids = kwargs.get("token_type_ids", None)
1054
+ # Omit tokens covered by past_key_values
1055
+ if past_key_values:
1056
+ past_length = past_key_values[0][0].shape[2]
1057
+
1058
+ # Some generation methods already pass only the last input ID
1059
+ if input_ids.shape[1] > past_length:
1060
+ remove_prefix_length = past_length
1061
+ else:
1062
+ # Default to old behavior: keep only final ID
1063
+ remove_prefix_length = input_ids.shape[1] - 1
1064
+
1065
+ input_ids = input_ids[:, remove_prefix_length:]
1066
+ if token_type_ids is not None:
1067
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
1068
+
1069
+ attention_mask = kwargs.get("attention_mask", None)
1070
+ position_ids = kwargs.get("position_ids", None)
1071
+
1072
+ if attention_mask is not None and position_ids is None:
1073
+ # create position_ids on the fly for batch generation
1074
+ position_ids = attention_mask.long().cumsum(-1) - 1
1075
+ position_ids.masked_fill_(attention_mask == 0, 1)
1076
+ if past_key_values:
1077
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1078
+
1079
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1080
+ if inputs_embeds is not None and past_key_values is None:
1081
+ model_inputs = {"inputs_embeds": inputs_embeds}
1082
+ else:
1083
+ model_inputs = {"input_ids": input_ids}
1084
+
1085
+ model_inputs.update(
1086
+ {
1087
+ "past_key_values": past_key_values,
1088
+ "use_cache": kwargs.get("use_cache"),
1089
+ "position_ids": position_ids,
1090
+ "attention_mask": attention_mask,
1091
+ "token_type_ids": token_type_ids,
1092
+ }
1093
+ )
1094
+
1095
+ return model_inputs
1096
+
1097
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1098
+ @add_code_sample_docstrings(
1099
+ checkpoint=_CHECKPOINT_FOR_DOC,
1100
+ output_type=CausalLMOutputWithPast,
1101
+ config_class=_CONFIG_FOR_DOC,
1102
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1103
+ )
1104
+ def forward(
1105
+ self,
1106
+ input_ids: Optional[torch.LongTensor] = None,
1107
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1108
+ attention_mask: Optional[torch.FloatTensor] = None,
1109
+ token_type_ids: Optional[torch.LongTensor] = None,
1110
+ position_ids: Optional[torch.LongTensor] = None,
1111
+ head_mask: Optional[torch.FloatTensor] = None,
1112
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1113
+ labels: Optional[torch.LongTensor] = None,
1114
+ use_cache: Optional[bool] = None,
1115
+ output_attentions: Optional[bool] = None,
1116
+ output_hidden_states: Optional[bool] = None,
1117
+ return_dict: Optional[bool] = None,
1118
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1119
+ r"""
1120
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1121
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1122
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1123
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1124
+ """
1125
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1126
+
1127
+ transformer_outputs = self.transformer(
1128
+ input_ids,
1129
+ past_key_values=past_key_values,
1130
+ attention_mask=attention_mask,
1131
+ token_type_ids=token_type_ids,
1132
+ position_ids=position_ids,
1133
+ head_mask=head_mask,
1134
+ inputs_embeds=inputs_embeds,
1135
+ use_cache=use_cache,
1136
+ output_attentions=output_attentions,
1137
+ output_hidden_states=output_hidden_states,
1138
+ return_dict=return_dict,
1139
+ )
1140
+ hidden_states = transformer_outputs[0]
1141
+
1142
+ # Set device for model parallelism
1143
+ if self.model_parallel:
1144
+ torch.cuda.set_device(self.transformer.first_device)
1145
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1146
+
1147
+ # make sure sampling in fp16 works correctly and
1148
+ # compute loss in fp32 to match with mesh-tf version
1149
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
1150
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
1151
+
1152
+ loss = None
1153
+ if labels is not None:
1154
+ # move labels to correct device to enable model parallelism
1155
+ labels = labels.to(lm_logits.device)
1156
+ # Shift so that tokens < n predict n
1157
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1158
+ shift_labels = labels[..., 1:].contiguous()
1159
+ # Flatten the tokens
1160
+ loss_fct = CrossEntropyLoss()
1161
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1162
+
1163
+ loss = loss.to(hidden_states.dtype)
1164
+
1165
+ if not return_dict:
1166
+ output = (lm_logits,) + transformer_outputs[1:]
1167
+ return ((loss,) + output) if loss is not None else output
1168
+
1169
+ return CausalLMOutputWithPast(
1170
+ loss=loss,
1171
+ logits=lm_logits,
1172
+ past_key_values=transformer_outputs.past_key_values,
1173
+ hidden_states=transformer_outputs.hidden_states,
1174
+ attentions=transformer_outputs.attentions,
1175
+ )
1176
+
1177
+ @staticmethod
1178
+ def _reorder_cache(
1179
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1180
+ ) -> Tuple[Tuple[torch.Tensor]]:
1181
+ """
1182
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
1183
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1184
+ beam_idx at every generation step.
1185
+ """
1186
+ return tuple(
1187
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1188
+ for layer_past in past_key_values
1189
+ )
1190
+
1191
+
1192
+ @add_start_docstrings(
1193
+ """
1194
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
1195
+
1196
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1197
+ (e.g. GPT, GPT-2, GPT-Neo) do.
1198
+
1199
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1200
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1201
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1202
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1203
+ each row of the batch).
1204
+ """,
1205
+ GPTJ_START_DOCSTRING,
1206
+ )
1207
+ class GPTJForSequenceClassification(GPTJPreTrainedModel):
1208
+ def __init__(self, config):
1209
+ super().__init__(config)
1210
+ self.num_labels = config.num_labels
1211
+ self.transformer = GPTJModel(config)
1212
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1213
+
1214
+ # Model parallel
1215
+ self.model_parallel = False
1216
+ self.device_map = None
1217
+
1218
+ # Initialize weights and apply final processing
1219
+ self.post_init()
1220
+
1221
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1222
+ @add_code_sample_docstrings(
1223
+ checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
1224
+ output_type=SequenceClassifierOutputWithPast,
1225
+ config_class=_CONFIG_FOR_DOC,
1226
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1227
+ )
1228
+ def forward(
1229
+ self,
1230
+ input_ids: Optional[torch.LongTensor] = None,
1231
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1232
+ attention_mask: Optional[torch.FloatTensor] = None,
1233
+ token_type_ids: Optional[torch.LongTensor] = None,
1234
+ position_ids: Optional[torch.LongTensor] = None,
1235
+ head_mask: Optional[torch.FloatTensor] = None,
1236
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1237
+ labels: Optional[torch.LongTensor] = None,
1238
+ use_cache: Optional[bool] = None,
1239
+ output_attentions: Optional[bool] = None,
1240
+ output_hidden_states: Optional[bool] = None,
1241
+ return_dict: Optional[bool] = None,
1242
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1243
+ r"""
1244
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1245
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1246
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1247
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1248
+ """
1249
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1250
+
1251
+ transformer_outputs = self.transformer(
1252
+ input_ids,
1253
+ past_key_values=past_key_values,
1254
+ attention_mask=attention_mask,
1255
+ token_type_ids=token_type_ids,
1256
+ position_ids=position_ids,
1257
+ head_mask=head_mask,
1258
+ inputs_embeds=inputs_embeds,
1259
+ use_cache=use_cache,
1260
+ output_attentions=output_attentions,
1261
+ output_hidden_states=output_hidden_states,
1262
+ return_dict=return_dict,
1263
+ )
1264
+ hidden_states = transformer_outputs[0]
1265
+ logits = self.score(hidden_states)
1266
+
1267
+ if input_ids is not None:
1268
+ batch_size = input_ids.shape[0]
1269
+ else:
1270
+ batch_size = inputs_embeds.shape[0]
1271
+
1272
+ if self.config.pad_token_id is None and batch_size != 1:
1273
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1274
+ if self.config.pad_token_id is None:
1275
+ sequence_lengths = -1
1276
+ else:
1277
+ if input_ids is not None:
1278
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1279
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1280
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1281
+ sequence_lengths = sequence_lengths.to(logits.device)
1282
+ else:
1283
+ sequence_lengths = -1
1284
+ logger.warning(
1285
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1286
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1287
+ )
1288
+
1289
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1290
+
1291
+ loss = None
1292
+ if labels is not None:
1293
+ labels = labels.to(pooled_logits.device)
1294
+ if self.config.problem_type is None:
1295
+ if self.num_labels == 1:
1296
+ self.config.problem_type = "regression"
1297
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1298
+ self.config.problem_type = "single_label_classification"
1299
+ else:
1300
+ self.config.problem_type = "multi_label_classification"
1301
+
1302
+ if self.config.problem_type == "regression":
1303
+ loss_fct = MSELoss()
1304
+ if self.num_labels == 1:
1305
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1306
+ else:
1307
+ loss = loss_fct(pooled_logits, labels)
1308
+ elif self.config.problem_type == "single_label_classification":
1309
+ loss_fct = CrossEntropyLoss()
1310
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1311
+ elif self.config.problem_type == "multi_label_classification":
1312
+ loss_fct = BCEWithLogitsLoss()
1313
+ loss = loss_fct(pooled_logits, labels)
1314
+ if not return_dict:
1315
+ output = (pooled_logits,) + transformer_outputs[1:]
1316
+ return ((loss,) + output) if loss is not None else output
1317
+
1318
+ return SequenceClassifierOutputWithPast(
1319
+ loss=loss,
1320
+ logits=pooled_logits,
1321
+ past_key_values=transformer_outputs.past_key_values,
1322
+ hidden_states=transformer_outputs.hidden_states,
1323
+ attentions=transformer_outputs.attentions,
1324
+ )
1325
+
1326
+
1327
+ @add_start_docstrings(
1328
+ """
1329
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1330
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1331
+ """,
1332
+ GPTJ_START_DOCSTRING,
1333
+ )
1334
+ class GPTJForQuestionAnswering(GPTJPreTrainedModel):
1335
+ def __init__(self, config):
1336
+ super().__init__(config)
1337
+ self.num_labels = config.num_labels
1338
+ self.transformer = GPTJModel(config)
1339
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1340
+
1341
+ # Model parallel
1342
+ self.model_parallel = False
1343
+ self.device_map = None
1344
+
1345
+ # Initialize weights and apply final processing
1346
+ self.post_init()
1347
+
1348
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1349
+ @add_code_sample_docstrings(
1350
+ checkpoint=_CHECKPOINT_FOR_DOC,
1351
+ output_type=QuestionAnsweringModelOutput,
1352
+ config_class=_CONFIG_FOR_DOC,
1353
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1354
+ )
1355
+ def forward(
1356
+ self,
1357
+ input_ids: Optional[torch.LongTensor] = None,
1358
+ attention_mask: Optional[torch.FloatTensor] = None,
1359
+ token_type_ids: Optional[torch.LongTensor] = None,
1360
+ position_ids: Optional[torch.LongTensor] = None,
1361
+ head_mask: Optional[torch.FloatTensor] = None,
1362
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1363
+ start_positions: Optional[torch.LongTensor] = None,
1364
+ end_positions: Optional[torch.LongTensor] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ return_dict: Optional[bool] = None,
1368
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1369
+ r"""
1370
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1371
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1372
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1373
+ are not taken into account for computing the loss.
1374
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ outputs = self.transformer(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ token_type_ids=token_type_ids,
1385
+ position_ids=position_ids,
1386
+ head_mask=head_mask,
1387
+ inputs_embeds=inputs_embeds,
1388
+ output_attentions=output_attentions,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+
1393
+ sequence_output = outputs[0]
1394
+
1395
+ logits = self.qa_outputs(sequence_output)
1396
+ start_logits, end_logits = logits.split(1, dim=-1)
1397
+ start_logits = start_logits.squeeze(-1).contiguous()
1398
+ end_logits = end_logits.squeeze(-1).contiguous()
1399
+
1400
+ total_loss = None
1401
+ if start_positions is not None and end_positions is not None:
1402
+ # If we are on multi-GPU, split add a dimension
1403
+ if len(start_positions.size()) > 1:
1404
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1405
+ if len(end_positions.size()) > 1:
1406
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1407
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1408
+ ignored_index = start_logits.size(1)
1409
+ start_positions = start_positions.clamp(0, ignored_index)
1410
+ end_positions = end_positions.clamp(0, ignored_index)
1411
+
1412
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1413
+ start_loss = loss_fct(start_logits, start_positions)
1414
+ end_loss = loss_fct(end_logits, end_positions)
1415
+ total_loss = (start_loss + end_loss) / 2
1416
+
1417
+ if not return_dict:
1418
+ output = (start_logits, end_logits) + outputs[2:]
1419
+ return ((total_loss,) + output) if total_loss is not None else output
1420
+
1421
+ return QuestionAnsweringModelOutput(
1422
+ loss=total_loss,
1423
+ start_logits=start_logits,
1424
+ end_logits=end_logits,
1425
+ hidden_states=outputs.hidden_states,
1426
+ attentions=outputs.attentions,
1427
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py ADDED
@@ -0,0 +1,1099 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 GPT-J model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import tensorflow as tf
23
+
24
+ from ...activations_tf import get_tf_activation
25
+ from ...file_utils import (
26
+ add_code_sample_docstrings,
27
+ add_start_docstrings,
28
+ add_start_docstrings_to_model_forward,
29
+ )
30
+ from ...modeling_tf_outputs import (
31
+ TFBaseModelOutputWithPast,
32
+ TFCausalLMOutputWithPast,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutputWithPast,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFCausalLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFPreTrainedModel,
40
+ TFQuestionAnsweringLoss,
41
+ TFSequenceClassificationLoss,
42
+ TFSharedEmbeddings,
43
+ get_initializer,
44
+ keras,
45
+ keras_serializable,
46
+ unpack_inputs,
47
+ )
48
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
49
+ from ...utils import logging
50
+ from .configuration_gptj import GPTJConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
56
+ _CONFIG_FOR_DOC = "GPTJConfig"
57
+
58
+
59
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
60
+ inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
61
+ sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
62
+ sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
63
+ out = tf.concat((sin, cos), axis=1)
64
+ return out
65
+
66
+
67
+ def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
68
+ rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
69
+ new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
70
+ rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
71
+ return rotate_half_tensor
72
+
73
+
74
+ def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
75
+ sin_pos, cos_pos = sincos
76
+ sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
77
+ cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
78
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
79
+
80
+
81
+ class TFGPTJAttention(keras.layers.Layer):
82
+ def __init__(self, config: GPTJConfig, **kwargs):
83
+ super().__init__(**kwargs)
84
+
85
+ self.embed_dim = config.hidden_size
86
+ self.num_attention_heads = config.num_attention_heads
87
+ self.head_dim = self.embed_dim // self.num_attention_heads
88
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
89
+ raise ValueError(
90
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
91
+ f" `num_attention_heads`: {self.num_attention_heads})."
92
+ )
93
+ self.scale_attn = self.head_dim**0.5
94
+ self.rotary_dim = config.rotary_dim
95
+
96
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
97
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
98
+
99
+ self.q_proj = keras.layers.Dense(
100
+ self.embed_dim,
101
+ use_bias=False,
102
+ kernel_initializer=get_initializer(config.initializer_range),
103
+ name="q_proj",
104
+ )
105
+ self.k_proj = keras.layers.Dense(
106
+ self.embed_dim,
107
+ use_bias=False,
108
+ kernel_initializer=get_initializer(config.initializer_range),
109
+ name="k_proj",
110
+ )
111
+ self.v_proj = keras.layers.Dense(
112
+ self.embed_dim,
113
+ use_bias=False,
114
+ kernel_initializer=get_initializer(config.initializer_range),
115
+ name="v_proj",
116
+ )
117
+ self.out_proj = keras.layers.Dense(
118
+ self.embed_dim,
119
+ use_bias=False,
120
+ kernel_initializer=get_initializer(config.initializer_range),
121
+ name="out_proj",
122
+ )
123
+
124
+ self.max_positions = config.max_position_embeddings
125
+ self.lower_triangle_mask = tf.reshape(
126
+ tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
127
+ (1, 1, self.max_positions, self.max_positions),
128
+ )
129
+ pos_embd_dim = self.rotary_dim or self.embed_dim
130
+ self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
131
+
132
+ def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
133
+ return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
134
+
135
+ @staticmethod
136
+ def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
137
+ return tf.cast(tf.constant(-1e9), dtype)
138
+
139
+ def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
140
+ """
141
+ Splits hidden dim into attn_head_size and num_attention_heads
142
+ """
143
+ new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
144
+ hidden_states = tf.reshape(hidden_states, new_shape)
145
+ if rotary:
146
+ return hidden_states
147
+ if len(shape_list(hidden_states)) == 4:
148
+ return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
149
+ if len(shape_list(hidden_states)) == 5:
150
+ return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
151
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
152
+
153
+ def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
154
+ """
155
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
156
+ """
157
+ if len(shape_list(hidden_states)) == 4:
158
+ hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
159
+ elif len(shape_list(hidden_states)) == 5:
160
+ hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
161
+ else:
162
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
163
+ new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
164
+ return tf.reshape(hidden_states, new_shape)
165
+
166
+ def _attn(
167
+ self,
168
+ query: tf.Tensor,
169
+ key: tf.Tensor,
170
+ value: tf.Tensor,
171
+ attention_mask: tf.Tensor | None = None,
172
+ head_mask: tf.Tensor | None = None,
173
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
174
+ # compute causal mask from causal mask buffer
175
+ query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
176
+ causal_mask = self.get_causal_mask(key_length, query_length)
177
+
178
+ # Keep the attention weights computation in fp32 to avoid overflow issues
179
+ query = tf.cast(query, tf.float32)
180
+ key = tf.cast(key, tf.float32)
181
+
182
+ attn_weights = tf.matmul(query, key, transpose_b=True)
183
+ attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
184
+
185
+ attn_weights = attn_weights / self.scale_attn
186
+
187
+ if attention_mask is not None:
188
+ # Apply the attention mask
189
+ attn_weights = attn_weights + attention_mask
190
+
191
+ attn_weights = stable_softmax(attn_weights, axis=-1)
192
+ attn_weights = tf.cast(attn_weights, value.dtype)
193
+ attn_weights = self.attn_dropout(attn_weights)
194
+
195
+ # Mask heads if we want to
196
+ if head_mask is not None:
197
+ attn_weights = attn_weights * head_mask
198
+
199
+ attn_output = tf.matmul(attn_weights, value)
200
+
201
+ return attn_output, attn_weights
202
+
203
+ def call(
204
+ self,
205
+ hidden_states: tf.Tensor,
206
+ layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
207
+ attention_mask: tf.Tensor | None = None,
208
+ position_ids: tf.Tensor | None = None,
209
+ head_mask: tf.Tensor | None = None,
210
+ use_cache: bool = False,
211
+ output_attentions: bool = False,
212
+ ):
213
+ query = self.q_proj(hidden_states)
214
+ key = self.k_proj(hidden_states)
215
+ value = self.v_proj(hidden_states)
216
+
217
+ query = self._split_heads(query, True)
218
+ key = self._split_heads(key, True)
219
+ value = self._split_heads(value, False)
220
+
221
+ sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
222
+ sincos = tf.split(sincos, 2, axis=-1)
223
+ if self.rotary_dim is not None:
224
+ k_rot = key[:, :, :, : self.rotary_dim]
225
+ k_pass = key[:, :, :, self.rotary_dim :]
226
+
227
+ q_rot = query[:, :, :, : self.rotary_dim]
228
+ q_pass = query[:, :, :, self.rotary_dim :]
229
+
230
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
231
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
232
+
233
+ key = tf.concat((k_rot, k_pass), axis=-1)
234
+ query = tf.concat((q_rot, q_pass), axis=-1)
235
+ else:
236
+ key = apply_rotary_pos_emb(key, sincos)
237
+ query = apply_rotary_pos_emb(query, sincos)
238
+
239
+ key = tf.transpose(key, (0, 2, 1, 3))
240
+ query = tf.transpose(query, (0, 2, 1, 3))
241
+
242
+ if layer_past is not None:
243
+ past_key = layer_past[0]
244
+ past_value = layer_past[1]
245
+ key = tf.concat((past_key, key), axis=-2)
246
+ value = tf.concat((past_value, value), axis=-2)
247
+
248
+ if use_cache is True:
249
+ present = (key, value)
250
+ else:
251
+ present = None
252
+
253
+ # compute self-attention: V x Softmax(QK^T)
254
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
255
+
256
+ attn_output = self._merge_heads(attn_output)
257
+ attn_output = self.out_proj(attn_output)
258
+ attn_output = self.resid_dropout(attn_output)
259
+
260
+ outputs = (attn_output, present)
261
+ if output_attentions:
262
+ outputs += (attn_weights,)
263
+
264
+ return outputs # a, present, (attentions)
265
+
266
+ def build(self, input_shape=None):
267
+ if self.built:
268
+ return
269
+ self.built = True
270
+ if getattr(self, "q_proj", None) is not None:
271
+ with tf.name_scope(self.q_proj.name):
272
+ self.q_proj.build([None, None, self.embed_dim])
273
+ if getattr(self, "k_proj", None) is not None:
274
+ with tf.name_scope(self.k_proj.name):
275
+ self.k_proj.build([None, None, self.embed_dim])
276
+ if getattr(self, "v_proj", None) is not None:
277
+ with tf.name_scope(self.v_proj.name):
278
+ self.v_proj.build([None, None, self.embed_dim])
279
+ if getattr(self, "out_proj", None) is not None:
280
+ with tf.name_scope(self.out_proj.name):
281
+ self.out_proj.build([None, None, self.embed_dim])
282
+
283
+
284
+ class TFGPTJMLP(keras.layers.Layer):
285
+ def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
286
+ super().__init__(**kwargs)
287
+ embed_dim = config.n_embd
288
+
289
+ self.fc_in = keras.layers.Dense(
290
+ intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
291
+ )
292
+ self.fc_out = keras.layers.Dense(
293
+ embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
294
+ )
295
+
296
+ self.act = get_tf_activation(config.activation_function)
297
+ self.dropout = keras.layers.Dropout(config.embd_pdrop)
298
+ self.embed_dim = config.n_embd
299
+ self.intermediate_size = intermediate_size
300
+
301
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
302
+ hidden_states = self.fc_in(hidden_states)
303
+ hidden_states = self.act(hidden_states)
304
+ hidden_states = self.fc_out(hidden_states)
305
+ hidden_states = self.dropout(hidden_states)
306
+ return hidden_states
307
+
308
+ def build(self, input_shape=None):
309
+ if self.built:
310
+ return
311
+ self.built = True
312
+ if getattr(self, "fc_in", None) is not None:
313
+ with tf.name_scope(self.fc_in.name):
314
+ self.fc_in.build([None, None, self.embed_dim])
315
+ if getattr(self, "fc_out", None) is not None:
316
+ with tf.name_scope(self.fc_out.name):
317
+ self.fc_out.build([None, None, self.intermediate_size])
318
+
319
+
320
+ class TFGPTJBlock(keras.layers.Layer):
321
+ def __init__(self, config: GPTJConfig, **kwargs):
322
+ super().__init__(**kwargs)
323
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
324
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
325
+ self.attn = TFGPTJAttention(config, name="attn")
326
+ self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
327
+ self.config = config
328
+
329
+ def call(
330
+ self,
331
+ hidden_states: tf.Tensor,
332
+ layer_past: tf.Tensor | None = None,
333
+ attention_mask: tf.Tensor | None = None,
334
+ position_ids: tf.Tensor | None = None,
335
+ head_mask: tf.Tensor | None = None,
336
+ use_cache: bool = False,
337
+ output_attentions: bool = False,
338
+ ):
339
+ residual = hidden_states
340
+ hidden_states = self.ln_1(hidden_states)
341
+ attn_outputs = self.attn(
342
+ hidden_states=hidden_states,
343
+ layer_past=layer_past,
344
+ attention_mask=attention_mask,
345
+ position_ids=position_ids,
346
+ head_mask=head_mask,
347
+ use_cache=use_cache,
348
+ output_attentions=output_attentions,
349
+ ) # attn_outputs: attn_output, present, (attentions)
350
+ attn_output = attn_outputs[0]
351
+ outputs = attn_outputs[1:]
352
+
353
+ feed_forward_hidden_states = self.mlp(hidden_states)
354
+ hidden_states = attn_output + feed_forward_hidden_states + residual
355
+
356
+ if use_cache:
357
+ outputs = (hidden_states,) + outputs
358
+ else:
359
+ outputs = (hidden_states,) + outputs[1:]
360
+ return outputs # hidden_states, present, (attentions)
361
+
362
+ def build(self, input_shape=None):
363
+ if self.built:
364
+ return
365
+ self.built = True
366
+ if getattr(self, "ln_1", None) is not None:
367
+ with tf.name_scope(self.ln_1.name):
368
+ self.ln_1.build([None, None, self.config.n_embd])
369
+ if getattr(self, "attn", None) is not None:
370
+ with tf.name_scope(self.attn.name):
371
+ self.attn.build(None)
372
+ if getattr(self, "mlp", None) is not None:
373
+ with tf.name_scope(self.mlp.name):
374
+ self.mlp.build(None)
375
+
376
+
377
+ @keras_serializable
378
+ class TFGPTJMainLayer(keras.layers.Layer):
379
+ config_class = GPTJConfig
380
+
381
+ def __init__(self, config: GPTJConfig, *inputs, **kwargs):
382
+ super().__init__(*inputs, **kwargs)
383
+
384
+ self.config = config
385
+ self.output_attentions = config.output_attentions
386
+ self.output_hidden_states = config.output_hidden_states
387
+ self.use_cache = config.use_cache
388
+ self.return_dict = config.use_return_dict
389
+
390
+ self.num_hidden_layers = config.n_layer
391
+ self.n_embd = config.n_embd
392
+ self.n_positions = config.n_positions
393
+ self.initializer_range = config.initializer_range
394
+
395
+ self.wte = TFSharedEmbeddings(
396
+ config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
397
+ )
398
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
399
+ self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
400
+ self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
401
+ self.embed_dim = config.n_embd
402
+
403
+ def get_input_embeddings(self):
404
+ return self.wte
405
+
406
+ def set_input_embeddings(self, value: tf.Tensor):
407
+ self.wte.weight = value
408
+ self.wte.vocab_size = shape_list(value)[0]
409
+
410
+ def _prune_heads(self, heads_to_prune):
411
+ """
412
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
413
+ """
414
+ raise NotImplementedError
415
+
416
+ @unpack_inputs
417
+ def call(
418
+ self,
419
+ input_ids=None,
420
+ past_key_values=None,
421
+ attention_mask=None,
422
+ token_type_ids=None,
423
+ position_ids=None,
424
+ head_mask=None,
425
+ inputs_embeds=None,
426
+ use_cache=None,
427
+ output_attentions=None,
428
+ output_hidden_states=None,
429
+ return_dict=None,
430
+ training=False,
431
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
432
+ if input_ids is not None and inputs_embeds is not None:
433
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
434
+ elif input_ids is not None:
435
+ input_shape = shape_list(input_ids)
436
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
437
+ elif inputs_embeds is not None:
438
+ input_shape = shape_list(inputs_embeds)[:-1]
439
+ else:
440
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
441
+
442
+ if past_key_values is None:
443
+ past_length = 0
444
+ past_key_values = [None] * len(self.h)
445
+ else:
446
+ past_length = shape_list(past_key_values[0][0])[-2]
447
+
448
+ if position_ids is None:
449
+ position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
450
+
451
+ if attention_mask is not None:
452
+ # We create a 3D attention mask from a 2D tensor mask.
453
+ # Sizes are [batch_size, 1, 1, to_seq_length]
454
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
455
+ # this attention mask is more simple than the triangular masking of causal attention
456
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
457
+ attention_mask_shape = shape_list(attention_mask)
458
+ attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
459
+
460
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
461
+ # masked positions, this operation will create a tensor which is 0.0 for
462
+ # positions we want to attend and -10000.0 for masked positions.
463
+ # Since we are adding it to the raw scores before the softmax, this is
464
+ # effectively the same as removing these entirely.
465
+ one_cst = tf.constant(1.0)
466
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
467
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
468
+
469
+ # Prepare head mask if needed
470
+ # 1.0 in head_mask indicate we keep the head
471
+ # attention_probs has shape bsz x n_heads x N x N
472
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
473
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
474
+ if head_mask is not None:
475
+ raise NotImplementedError
476
+ else:
477
+ head_mask = [None] * self.num_hidden_layers
478
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
479
+
480
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
481
+
482
+ if inputs_embeds is None:
483
+ check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
484
+ inputs_embeds = self.wte(input_ids, mode="embedding")
485
+
486
+ if token_type_ids is not None:
487
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
488
+ token_type_embeds = self.wte(token_type_ids, mode="embedding")
489
+ else:
490
+ token_type_embeds = tf.constant(0.0)
491
+
492
+ token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
493
+ hidden_states = inputs_embeds + token_type_embeds
494
+ hidden_states = self.drop(hidden_states, training=training)
495
+
496
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
497
+
498
+ presents = () if use_cache else None
499
+ all_attentions = () if output_attentions else None
500
+ all_hidden_states = () if output_hidden_states else None
501
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
502
+ if output_hidden_states:
503
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
504
+
505
+ outputs = block(
506
+ hidden_states=hidden_states,
507
+ layer_past=layer_past,
508
+ attention_mask=attention_mask,
509
+ position_ids=position_ids,
510
+ head_mask=head_mask[i],
511
+ use_cache=use_cache,
512
+ output_attentions=output_attentions,
513
+ training=training,
514
+ )
515
+
516
+ hidden_states = outputs[0]
517
+ if use_cache:
518
+ presents = presents + (outputs[1],)
519
+
520
+ if output_attentions:
521
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
522
+
523
+ hidden_states = self.ln_f(hidden_states)
524
+
525
+ hidden_states = tf.reshape(hidden_states, output_shape)
526
+ # Add last hidden state
527
+ if output_hidden_states:
528
+ all_hidden_states = all_hidden_states + (hidden_states,)
529
+
530
+ if output_attentions:
531
+ # let the number of heads free (-1) so we can extract attention even after head pruning
532
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
533
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
534
+
535
+ if not return_dict:
536
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
537
+
538
+ return TFBaseModelOutputWithPast(
539
+ last_hidden_state=hidden_states,
540
+ past_key_values=presents,
541
+ hidden_states=all_hidden_states,
542
+ attentions=all_attentions,
543
+ )
544
+
545
+ def build(self, input_shape=None):
546
+ if self.built:
547
+ return
548
+ self.built = True
549
+ if getattr(self, "wte", None) is not None:
550
+ with tf.name_scope(self.wte.name):
551
+ self.wte.build(None)
552
+ if getattr(self, "ln_f", None) is not None:
553
+ with tf.name_scope(self.ln_f.name):
554
+ self.ln_f.build([None, None, self.embed_dim])
555
+ if getattr(self, "h", None) is not None:
556
+ for layer in self.h:
557
+ with tf.name_scope(layer.name):
558
+ layer.build(None)
559
+
560
+
561
+ class TFGPTJPreTrainedModel(TFPreTrainedModel):
562
+ """
563
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
564
+ models.
565
+ """
566
+
567
+ config_class = GPTJConfig
568
+ base_model_prefix = "transformer"
569
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
570
+ _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
571
+
572
+
573
+ GPTJ_START_DOCSTRING = r"""
574
+
575
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
576
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
577
+ etc.)
578
+
579
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
580
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
581
+ behavior.
582
+
583
+ <Tip>
584
+
585
+ TensorFlow models and layers in `transformers` accept two formats as input:
586
+
587
+ - having all inputs as keyword arguments (like PyTorch models), or
588
+ - having all inputs as a list, tuple or dict in the first positional argument.
589
+
590
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
591
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
592
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
593
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
594
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
595
+ positional argument:
596
+
597
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
598
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
599
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
600
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
601
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
602
+
603
+ Note that when creating models and layers with
604
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
605
+ about any of this, as you can just pass inputs like you would to any other Python function!
606
+
607
+ </Tip>
608
+
609
+ Parameters:
610
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
611
+ Initializing with a config file does not load the weights associated with the model, only the
612
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
613
+ """
614
+
615
+ GPTJ_INPUTS_DOCSTRING = r"""
616
+ Args:
617
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
618
+ `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
619
+ input past key value states). Indices of input sequence tokens in the vocabulary.
620
+
621
+ If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
622
+
623
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
624
+ [`PreTrainedTokenizer.encode`] for details.
625
+
626
+ [What are input IDs?](../glossary#input-ids)
627
+ past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
628
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
629
+ `past` output below). Can be used to speed up sequential decoding. The token ids which have their past
630
+ given to this model should not be passed as input ids as they have already been computed.
631
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
632
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ [What are attention masks?](../glossary#attention-mask)
638
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
639
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
640
+ 1]`:
641
+
642
+ - 0 corresponds to a *sentence A* token,
643
+ - 1 corresponds to a *sentence B* token.
644
+
645
+ [What are token type IDs?](../glossary#token-type-ids)
646
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
647
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
648
+ config.max_position_embeddings - 1]`.
649
+
650
+ [What are position IDs?](../glossary#position-ids)
651
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
652
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
653
+
654
+ - 1 indicates the head is **not masked**,
655
+ - 0 indicates the head is **masked**.
656
+
657
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
658
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
659
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
660
+ model's internal embedding lookup matrix.
661
+ output_attentions (`bool`, *optional*):
662
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
663
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
664
+ config will be used instead.
665
+ output_hidden_states (`bool`, *optional*):
666
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
667
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
668
+ used instead.
669
+ return_dict (`bool`, *optional*):
670
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
671
+ in eager mode, in graph mode the value will always be set to True.
672
+ training (`bool`, *optional*, defaults to `False`):
673
+ Whether or not to use the model in training mode (some modules like dropout modules have different
674
+ behaviors between training and evaluation).
675
+ """
676
+
677
+
678
+ @add_start_docstrings(
679
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
680
+ GPTJ_START_DOCSTRING,
681
+ )
682
+ class TFGPTJModel(TFGPTJPreTrainedModel):
683
+ def __init__(self, config, *inputs, **kwargs):
684
+ super().__init__(config, *inputs, **kwargs)
685
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
686
+
687
+ @unpack_inputs
688
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
689
+ @add_code_sample_docstrings(
690
+ checkpoint=_CHECKPOINT_FOR_DOC,
691
+ output_type=TFBaseModelOutputWithPast,
692
+ config_class=_CONFIG_FOR_DOC,
693
+ )
694
+ def call(
695
+ self,
696
+ input_ids: TFModelInputType | None = None,
697
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
698
+ attention_mask: np.ndarray | tf.Tensor | None = None,
699
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
700
+ position_ids: np.ndarray | tf.Tensor | None = None,
701
+ head_mask: np.ndarray | tf.Tensor | None = None,
702
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
703
+ use_cache: Optional[bool] = None,
704
+ output_attentions: Optional[bool] = None,
705
+ output_hidden_states: Optional[bool] = None,
706
+ return_dict: Optional[bool] = None,
707
+ training: Optional[bool] = False,
708
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
709
+ r"""
710
+ use_cache (`bool`, *optional*, defaults to `True`):
711
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
712
+ `past`). Set to `False` during training, `True` during generation
713
+ """
714
+
715
+ outputs = self.transformer(
716
+ input_ids=input_ids,
717
+ past_key_values=past_key_values,
718
+ attention_mask=attention_mask,
719
+ token_type_ids=token_type_ids,
720
+ position_ids=position_ids,
721
+ head_mask=head_mask,
722
+ inputs_embeds=inputs_embeds,
723
+ use_cache=use_cache,
724
+ output_attentions=output_attentions,
725
+ output_hidden_states=output_hidden_states,
726
+ return_dict=return_dict,
727
+ training=training,
728
+ )
729
+
730
+ return outputs
731
+
732
+ def build(self, input_shape=None):
733
+ if self.built:
734
+ return
735
+ self.built = True
736
+ if getattr(self, "transformer", None) is not None:
737
+ with tf.name_scope(self.transformer.name):
738
+ self.transformer.build(None)
739
+
740
+
741
+ @add_start_docstrings(
742
+ """
743
+ The GPT-J Model transformer with a language modeling head on top.
744
+ """,
745
+ GPTJ_START_DOCSTRING,
746
+ )
747
+ class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
748
+ def __init__(self, config, *inputs, **kwargs):
749
+ super().__init__(config, *inputs, **kwargs)
750
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
751
+ self.lm_head = keras.layers.Dense(
752
+ config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
753
+ )
754
+ self.config = config
755
+
756
+ def get_output_embeddings(self):
757
+ return self.lm_head
758
+
759
+ def set_output_embeddings(self, new_embeddings):
760
+ self.lm_head = new_embeddings
761
+
762
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
763
+ token_type_ids = kwargs.get("token_type_ids", None)
764
+ # only last token for inputs_ids if past is defined in kwargs
765
+ if past_key_values:
766
+ inputs = tf.expand_dims(inputs[:, -1], -1)
767
+ if token_type_ids is not None:
768
+ token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
769
+
770
+ position_ids = kwargs.get("position_ids", None)
771
+ attention_mask = kwargs.get("attention_mask", None)
772
+
773
+ if attention_mask is not None and position_ids is None:
774
+ position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
775
+ if past_key_values:
776
+ position_ids = tf.expand_dims(position_ids[:, -1], -1)
777
+
778
+ return {
779
+ "input_ids": inputs,
780
+ "attention_mask": attention_mask,
781
+ "position_ids": position_ids,
782
+ "past_key_values": past_key_values,
783
+ "use_cache": use_cache,
784
+ "token_type_ids": token_type_ids,
785
+ }
786
+
787
+ @unpack_inputs
788
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
789
+ @add_code_sample_docstrings(
790
+ checkpoint=_CHECKPOINT_FOR_DOC,
791
+ output_type=TFCausalLMOutputWithPast,
792
+ config_class=_CONFIG_FOR_DOC,
793
+ )
794
+ def call(
795
+ self,
796
+ input_ids: TFModelInputType | None = None,
797
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
798
+ attention_mask: np.ndarray | tf.Tensor | None = None,
799
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
800
+ position_ids: np.ndarray | tf.Tensor | None = None,
801
+ head_mask: np.ndarray | tf.Tensor | None = None,
802
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
803
+ labels: np.ndarray | tf.Tensor | None = None,
804
+ use_cache: Optional[bool] = None,
805
+ output_attentions: Optional[bool] = None,
806
+ output_hidden_states: Optional[bool] = None,
807
+ return_dict: Optional[bool] = None,
808
+ training: Optional[bool] = False,
809
+ ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
810
+ r"""
811
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
812
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
813
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
814
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
815
+ """
816
+
817
+ transformer_outputs = self.transformer(
818
+ input_ids=input_ids,
819
+ past_key_values=past_key_values,
820
+ attention_mask=attention_mask,
821
+ token_type_ids=token_type_ids,
822
+ position_ids=position_ids,
823
+ head_mask=head_mask,
824
+ inputs_embeds=inputs_embeds,
825
+ use_cache=use_cache,
826
+ output_attentions=output_attentions,
827
+ output_hidden_states=output_hidden_states,
828
+ return_dict=return_dict,
829
+ training=training,
830
+ )
831
+ hidden_states = transformer_outputs[0]
832
+ lm_logits = self.lm_head(hidden_states)
833
+
834
+ loss = None
835
+ if labels is not None:
836
+ # shift labels to the left and cut last logit token
837
+ shifted_logits = lm_logits[:, :-1]
838
+ labels = labels[:, 1:]
839
+ loss = self.hf_compute_loss(labels, shifted_logits)
840
+
841
+ if not return_dict:
842
+ output = (lm_logits,) + transformer_outputs[1:]
843
+ return ((loss,) + output) if loss is not None else output
844
+
845
+ return TFCausalLMOutputWithPast(
846
+ loss=loss,
847
+ logits=lm_logits,
848
+ past_key_values=transformer_outputs.past_key_values,
849
+ hidden_states=transformer_outputs.hidden_states,
850
+ attentions=transformer_outputs.attentions,
851
+ )
852
+
853
+ def build(self, input_shape=None):
854
+ if self.built:
855
+ return
856
+ self.built = True
857
+ if getattr(self, "transformer", None) is not None:
858
+ with tf.name_scope(self.transformer.name):
859
+ self.transformer.build(None)
860
+ if getattr(self, "lm_head", None) is not None:
861
+ with tf.name_scope(self.lm_head.name):
862
+ self.lm_head.build([None, None, self.config.n_embd])
863
+
864
+
865
+ @add_start_docstrings(
866
+ """
867
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
868
+
869
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
870
+ (e.g. GPT, GPT-2, GPT-Neo) do.
871
+
872
+ Since it does classification on the last token, it requires to know the position of the last token. If a
873
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
874
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
875
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
876
+ each row of the batch).
877
+ """,
878
+ GPTJ_START_DOCSTRING,
879
+ )
880
+ class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
881
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
882
+
883
+ def __init__(self, config, *inputs, **kwargs):
884
+ super().__init__(config, *inputs, **kwargs)
885
+ self.num_labels = config.num_labels
886
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
887
+ self.score = keras.layers.Dense(
888
+ self.num_labels,
889
+ use_bias=False,
890
+ kernel_initializer=get_initializer(config.initializer_range),
891
+ name="score",
892
+ )
893
+ self.config = config
894
+
895
+ @unpack_inputs
896
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
897
+ @add_code_sample_docstrings(
898
+ checkpoint=_CHECKPOINT_FOR_DOC,
899
+ output_type=TFSequenceClassifierOutputWithPast,
900
+ config_class=_CONFIG_FOR_DOC,
901
+ )
902
+ def call(
903
+ self,
904
+ input_ids: TFModelInputType | None = None,
905
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
906
+ attention_mask: np.ndarray | tf.Tensor | None = None,
907
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
908
+ position_ids: np.ndarray | tf.Tensor | None = None,
909
+ head_mask: np.ndarray | tf.Tensor | None = None,
910
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
911
+ labels: np.ndarray | tf.Tensor | None = None,
912
+ use_cache: Optional[bool] = None,
913
+ output_attentions: Optional[bool] = None,
914
+ output_hidden_states: Optional[bool] = None,
915
+ return_dict: Optional[bool] = None,
916
+ training: Optional[bool] = False,
917
+ ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
918
+ r"""
919
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
920
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
921
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
922
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
923
+ """
924
+
925
+ transformer_outputs = self.transformer(
926
+ input_ids=input_ids,
927
+ past_key_values=past_key_values,
928
+ attention_mask=attention_mask,
929
+ token_type_ids=token_type_ids,
930
+ position_ids=position_ids,
931
+ head_mask=head_mask,
932
+ inputs_embeds=inputs_embeds,
933
+ use_cache=use_cache,
934
+ output_attentions=output_attentions,
935
+ output_hidden_states=output_hidden_states,
936
+ return_dict=return_dict,
937
+ training=training,
938
+ )
939
+ hidden_states = transformer_outputs[0]
940
+ logits = self.score(hidden_states)
941
+ logits_shape = shape_list(logits)
942
+ in_logits = None
943
+ if self.config.pad_token_id is None:
944
+ sequence_lengths = -1
945
+ else:
946
+ if input_ids is not None:
947
+ sequence_lengths = (
948
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
949
+ - 1
950
+ )
951
+ sequence_lengths = tf.where(
952
+ sequence_lengths >= 0,
953
+ sequence_lengths,
954
+ tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,
955
+ )
956
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
957
+ else:
958
+ sequence_lengths = -1
959
+ logger.warning(
960
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
961
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
962
+ )
963
+ loss = None
964
+
965
+ if labels is not None:
966
+ if self.config.pad_token_id is None and logits_shape[0] != 1:
967
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
968
+
969
+ if not tf.is_tensor(sequence_lengths):
970
+ in_logits = logits[0 : logits_shape[0], sequence_lengths]
971
+
972
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
973
+ pooled_logits = in_logits if in_logits is not None else logits
974
+
975
+ if not return_dict:
976
+ output = (pooled_logits,) + transformer_outputs[1:]
977
+ return ((loss,) + output) if loss is not None else output
978
+
979
+ return TFSequenceClassifierOutputWithPast(
980
+ loss=loss,
981
+ logits=pooled_logits,
982
+ past_key_values=transformer_outputs.past_key_values,
983
+ hidden_states=transformer_outputs.hidden_states,
984
+ attentions=transformer_outputs.attentions,
985
+ )
986
+
987
+ def build(self, input_shape=None):
988
+ if self.built:
989
+ return
990
+ self.built = True
991
+ if getattr(self, "transformer", None) is not None:
992
+ with tf.name_scope(self.transformer.name):
993
+ self.transformer.build(None)
994
+ if getattr(self, "score", None) is not None:
995
+ with tf.name_scope(self.score.name):
996
+ self.score.build([None, None, self.config.n_embd])
997
+
998
+
999
+ @add_start_docstrings(
1000
+ """
1001
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1002
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1003
+ """,
1004
+ GPTJ_START_DOCSTRING,
1005
+ )
1006
+ class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
1007
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
1008
+
1009
+ def __init__(self, config, *inputs, **kwargs):
1010
+ super().__init__(config, *inputs, **kwargs)
1011
+ self.num_labels = config.num_labels
1012
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
1013
+ self.qa_outputs = keras.layers.Dense(
1014
+ self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1015
+ )
1016
+ self.config = config
1017
+
1018
+ @unpack_inputs
1019
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1020
+ @add_code_sample_docstrings(
1021
+ checkpoint=_CHECKPOINT_FOR_DOC,
1022
+ output_type=TFQuestionAnsweringModelOutput,
1023
+ config_class=_CONFIG_FOR_DOC,
1024
+ )
1025
+ def call(
1026
+ self,
1027
+ input_ids: TFModelInputType | None = None,
1028
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1029
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1030
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1031
+ position_ids: np.ndarray | tf.Tensor | None = None,
1032
+ head_mask: np.ndarray | tf.Tensor | None = None,
1033
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1034
+ start_positions: np.ndarray | tf.Tensor | None = None,
1035
+ end_positions: np.ndarray | tf.Tensor | None = None,
1036
+ output_attentions: Optional[bool] = None,
1037
+ output_hidden_states: Optional[bool] = None,
1038
+ return_dict: Optional[bool] = None,
1039
+ training: Optional[bool] = False,
1040
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1041
+ r"""
1042
+ start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
1043
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1044
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1045
+ are not taken into account for computing the loss.
1046
+ end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
1047
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1048
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1049
+ are not taken into account for computing the loss.
1050
+ """
1051
+
1052
+ transformer_outputs = self.transformer(
1053
+ input_ids=input_ids,
1054
+ past_key_values=past_key_values,
1055
+ attention_mask=attention_mask,
1056
+ token_type_ids=token_type_ids,
1057
+ position_ids=position_ids,
1058
+ head_mask=head_mask,
1059
+ inputs_embeds=inputs_embeds,
1060
+ output_attentions=output_attentions,
1061
+ output_hidden_states=output_hidden_states,
1062
+ return_dict=return_dict,
1063
+ training=training,
1064
+ )
1065
+ sequence_output = transformer_outputs[0]
1066
+
1067
+ logits = self.qa_outputs(sequence_output)
1068
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1069
+ start_logits = tf.squeeze(start_logits, axis=-1)
1070
+ end_logits = tf.squeeze(end_logits, axis=-1)
1071
+
1072
+ loss = None
1073
+ if start_positions is not None and end_positions is not None:
1074
+ labels = {"start_position": start_positions}
1075
+ labels["end_position"] = end_positions
1076
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1077
+
1078
+ if not return_dict:
1079
+ output = (start_logits, end_logits) + transformer_outputs[2:]
1080
+ return ((loss,) + output) if loss is not None else output
1081
+
1082
+ return TFQuestionAnsweringModelOutput(
1083
+ loss=loss,
1084
+ start_logits=start_logits,
1085
+ end_logits=end_logits,
1086
+ hidden_states=transformer_outputs.hidden_states,
1087
+ attentions=transformer_outputs.attentions,
1088
+ )
1089
+
1090
+ def build(self, input_shape=None):
1091
+ if self.built:
1092
+ return
1093
+ self.built = True
1094
+ if getattr(self, "transformer", None) is not None:
1095
+ with tf.name_scope(self.transformer.name):
1096
+ self.transformer.build(None)
1097
+ if getattr(self, "qa_outputs", None) is not None:
1098
+ with tf.name_scope(self.qa_outputs.name):
1099
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_llava": ["LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaConfig"],
21
+ "processing_llava": ["LlavaProcessor"],
22
+ }
23
+
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_llava"] = [
32
+ "LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "LlavaForConditionalGeneration",
34
+ "LlavaPreTrainedModel",
35
+ ]
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ from .configuration_llava import LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaConfig
40
+ from .processing_llava import LlavaProcessor
41
+
42
+ try:
43
+ if not is_torch_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ from .modeling_llava import (
49
+ LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
50
+ LlavaForConditionalGeneration,
51
+ LlavaPreTrainedModel,
52
+ )
53
+
54
+ else:
55
+ import sys
56
+
57
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (942 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/configuration_llava.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/convert_llava_weights_to_hf.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/modeling_llava.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/__pycache__/processing_llava.cpython-310.pyc ADDED
Binary file (6.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ Llava model configuration"""
15
+
16
+ import warnings
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class LlavaConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an
32
+ Llava model according to the specified arguments, defining the model architecture. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the Llava-9B.
34
+
35
+ e.g. [llava-hf/llava-9b](https://huggingface.co/llava-hf/llava-9b)
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
42
+ The config object or dictionary of the vision backbone.
43
+ text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
44
+ The config object or dictionary of the text backbone.
45
+ ignore_index (`int`, *optional*, defaults to -100):
46
+ The ignore index for the loss function.
47
+ image_token_index (`int`, *optional*, defaults to 32000):
48
+ The image token index to encode the image prompt.
49
+ projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
50
+ The activation function used by the multimodal projector.
51
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
52
+ The feature selection strategy used to select the vision feature from the vision backbone.
53
+ Can be one of `"default"` or `"full"`.
54
+ vision_feature_layer (`int`, *optional*, defaults to -2):
55
+ The index of the layer to select the vision feature.
56
+
57
+ Example:
58
+
59
+ ```python
60
+ >>> from transformers import LlavaForConditionalGeneration, LlavaConfig, CLIPVisionConfig, LlamaConfig
61
+
62
+ >>> # Initializing a CLIP-vision config
63
+ >>> vision_config = CLIPVisionConfig()
64
+
65
+ >>> # Initializing a Llama config
66
+ >>> text_config = LlamaConfig()
67
+
68
+ >>> # Initializing a Llava llava-1.5-7b style configuration
69
+ >>> configuration = LlavaConfig(vision_config, text_config)
70
+
71
+ >>> # Initializing a model from the llava-1.5-7b style configuration
72
+ >>> model = LlavaForConditionalGeneration(configuration)
73
+
74
+ >>> # Accessing the model configuration
75
+ >>> configuration = model.config
76
+ ```"""
77
+
78
+ model_type = "llava"
79
+ is_composition = False
80
+
81
+ def __init__(
82
+ self,
83
+ vision_config=None,
84
+ text_config=None,
85
+ ignore_index=-100,
86
+ image_token_index=32000,
87
+ projector_hidden_act="gelu",
88
+ vision_feature_select_strategy="default",
89
+ vision_feature_layer=-2,
90
+ **kwargs,
91
+ ):
92
+ self.ignore_index = ignore_index
93
+ self.image_token_index = image_token_index
94
+ self.projector_hidden_act = projector_hidden_act
95
+
96
+ if vision_feature_select_strategy not in ["default", "full"]:
97
+ raise ValueError(
98
+ "vision_feature_select_strategy should be one of 'default', 'full'."
99
+ f"Got: {vision_feature_select_strategy}"
100
+ )
101
+
102
+ if "vocab_size" in kwargs:
103
+ warnings.warn(
104
+ "The `vocab_size` argument is deprecated and will be removed in v4.42, since it can be inferred from the `text_config`. Passing this argument has no effect",
105
+ FutureWarning,
106
+ )
107
+
108
+ self.vision_feature_select_strategy = vision_feature_select_strategy
109
+ self.vision_feature_layer = vision_feature_layer
110
+
111
+ if isinstance(vision_config, dict):
112
+ vision_config["model_type"] = (
113
+ vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
114
+ )
115
+ vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
116
+ elif vision_config is None:
117
+ vision_config = CONFIG_MAPPING["clip_vision_model"](
118
+ intermediate_size=4096,
119
+ hidden_size=1024,
120
+ patch_size=14,
121
+ image_size=336,
122
+ num_hidden_layers=24,
123
+ num_attention_heads=16,
124
+ vocab_size=32000,
125
+ projection_dim=768,
126
+ )
127
+
128
+ self.vision_config = vision_config
129
+
130
+ if isinstance(text_config, dict):
131
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
132
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
133
+ elif text_config is None:
134
+ text_config = CONFIG_MAPPING["llama"]()
135
+
136
+ self.text_config = text_config
137
+ self._vocab_size = self.text_config.vocab_size
138
+
139
+ super().__init__(**kwargs)
140
+
141
+ @property
142
+ def vocab_size(self):
143
+ warnings.warn(
144
+ "The `vocab_size` attribute is deprecated and will be removed in v4.42, Please use `text_config.vocab_size` instead.",
145
+ FutureWarning,
146
+ )
147
+ return self._vocab_size
148
+
149
+ @vocab_size.setter
150
+ def vocab_size(self, value):
151
+ self._vocab_size = value
152
+
153
+ def to_dict(self):
154
+ output = super().to_dict()
155
+ output.pop("_vocab_size", None)
156
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/convert_llava_weights_to_hf.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+
16
+ import torch
17
+ from huggingface_hub import hf_hub_download
18
+
19
+ from transformers import (
20
+ AddedToken,
21
+ AutoConfig,
22
+ AutoTokenizer,
23
+ CLIPImageProcessor,
24
+ LlavaConfig,
25
+ LlavaForConditionalGeneration,
26
+ LlavaProcessor,
27
+ )
28
+
29
+
30
+ EPILOG_TXT = """Example:
31
+ python transformers/src/transformers/models/llava/convert_llava_weights_to_hf.py --text_model_id lmsys/vicuna-7b-v1.5 --vision_model_id openai/clip-vit-large-patch14-336 --output_hub_path org/llava-v1.5-7b-conv --old_state_dict_id liuhaotian/llava-v1.5-7b
32
+
33
+ Example for creating the old state dict file with Python:
34
+
35
+ import torch
36
+ from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
37
+
38
+ # load model
39
+ kwargs = {"device_map": "auto", "torch_dtype": torch.float16}
40
+ model = LlavaLlamaForCausalLM.from_pretrained("liuhaotian/llava-v1.5-7b", low_cpu_mem_usage=True, **kwargs)
41
+
42
+ # load vision tower
43
+ model.get_vision_tower().load_model()
44
+
45
+ # Save state dict
46
+ torch.save(model.state_dict(), "tmp/hf_models/llava-v1.5-7b/model_state_dict.bin")
47
+ """
48
+
49
+ KEYS_TO_MODIFY_MAPPING = {
50
+ "model.vision_tower.": "",
51
+ "model.mm_projector": "multi_modal_projector",
52
+ "model": "model.model",
53
+ "vision_model.model": "vision_model",
54
+ "lm_head": "language_model.lm_head",
55
+ "model.model": "language_model.model",
56
+ "multi_modal_projector.0": "multi_modal_projector.linear_1",
57
+ "multi_modal_projector.2": "multi_modal_projector.linear_2",
58
+ }
59
+
60
+
61
+ def convert_state_dict_to_hf(state_dict):
62
+ new_state_dict = {}
63
+ for key, value in state_dict.items():
64
+ if key.endswith(".inv_freq"):
65
+ continue
66
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
67
+ if key_to_modify in key:
68
+ key = key.replace(key_to_modify, new_key)
69
+
70
+ new_state_dict[key] = value
71
+ return new_state_dict
72
+
73
+
74
+ def convert_llava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id):
75
+ torch.set_default_dtype(torch.float16)
76
+ text_config = AutoConfig.from_pretrained(text_model_id)
77
+
78
+ tokenizer = AutoTokenizer.from_pretrained(text_model_id)
79
+ tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
80
+ tokenizer.add_special_tokens({"pad_token": "<pad>"})
81
+
82
+ image_processor = CLIPImageProcessor.from_pretrained(vision_model_id)
83
+
84
+ processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
85
+
86
+ config = LlavaConfig(text_config=text_config)
87
+ config.pad_token_id = 32001
88
+
89
+ with torch.device("meta"):
90
+ model = LlavaForConditionalGeneration(config)
91
+
92
+ # Pad to 64 for performance reasons
93
+ pad_shape = 64
94
+
95
+ state_dict_path = hf_hub_download(old_state_dict_id, "model_state_dict.bin")
96
+
97
+ state_dict = torch.load(state_dict_path, map_location="cpu")
98
+ state_dict = convert_state_dict_to_hf(state_dict)
99
+ model.load_state_dict(state_dict, strict=True, assign=True)
100
+
101
+ pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
102
+ mu = torch.mean(pre_expansion_embeddings, dim=0).float()
103
+ n = pre_expansion_embeddings.size()[0]
104
+ sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
105
+ dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
106
+
107
+ # We add an image token so we resize the model
108
+ model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape)
109
+ model.language_model.model.embed_tokens.weight.data[32000:] = torch.stack(
110
+ tuple((dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[32000:].shape[0]))),
111
+ dim=0,
112
+ )
113
+ model.language_model.lm_head.weight.data[32000:] = torch.stack(
114
+ tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[32000:].shape[0]))),
115
+ dim=0,
116
+ )
117
+
118
+ model.push_to_hub(output_hub_path)
119
+ processor.push_to_hub(output_hub_path)
120
+
121
+
122
+ def main():
123
+ parser = argparse.ArgumentParser(
124
+ epilog=EPILOG_TXT,
125
+ formatter_class=argparse.RawDescriptionHelpFormatter,
126
+ )
127
+ parser.add_argument(
128
+ "--text_model_id",
129
+ help="Hub location of the text model",
130
+ )
131
+ parser.add_argument(
132
+ "--vision_model_id",
133
+ help="Hub location of the vision model",
134
+ )
135
+ parser.add_argument(
136
+ "--output_hub_path",
137
+ help="Location on the hub of the converted model",
138
+ )
139
+ parser.add_argument(
140
+ "--old_state_dict_id",
141
+ help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`",
142
+ )
143
+ args = parser.parse_args()
144
+ convert_llava_llama_to_hf(args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id)
145
+
146
+
147
+ if __name__ == "__main__":
148
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/modeling_llava.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Llava model."""
16
+
17
+ from dataclasses import dataclass
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ... import PreTrainedModel
25
+ from ...activations import ACT2FN
26
+ from ...cache_utils import Cache
27
+ from ...modeling_outputs import ModelOutput
28
+ from ...utils import (
29
+ add_start_docstrings,
30
+ add_start_docstrings_to_model_forward,
31
+ logging,
32
+ replace_return_docstrings,
33
+ )
34
+ from ..auto import AutoModel, AutoModelForCausalLM
35
+ from .configuration_llava import LlavaConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "LlavaConfig"
41
+
42
+
43
+ from ..deprecated._archive_maps import LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
44
+
45
+
46
+ @dataclass
47
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Llava
48
+ class LlavaCausalLMOutputWithPast(ModelOutput):
49
+ """
50
+ Base class for Llava causal language model (or autoregressive) outputs.
51
+
52
+ Args:
53
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
54
+ Language modeling loss (for next-token prediction).
55
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
56
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
57
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
58
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
59
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
60
+
61
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
62
+ `past_key_values` input) to speed up sequential decoding.
63
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
64
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
65
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
66
+
67
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
68
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
69
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
70
+ sequence_length)`.
71
+
72
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
73
+ heads.
74
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
75
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
76
+ sequence_length, hidden_size)`.
77
+
78
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
79
+ """
80
+
81
+ loss: Optional[torch.FloatTensor] = None
82
+ logits: torch.FloatTensor = None
83
+ past_key_values: Optional[List[torch.FloatTensor]] = None
84
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
85
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
86
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
87
+
88
+
89
+ class LlavaMultiModalProjector(nn.Module):
90
+ def __init__(self, config: LlavaConfig):
91
+ super().__init__()
92
+
93
+ self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
94
+ self.act = ACT2FN[config.projector_hidden_act]
95
+ self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
96
+
97
+ def forward(self, image_features):
98
+ hidden_states = self.linear_1(image_features)
99
+ hidden_states = self.act(hidden_states)
100
+ hidden_states = self.linear_2(hidden_states)
101
+ return hidden_states
102
+
103
+
104
+ LLAVA_START_DOCSTRING = r"""
105
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
106
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
107
+ etc.)
108
+
109
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
110
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
111
+ and behavior.
112
+
113
+ Parameters:
114
+ config ([`LlavaConfig`] or [`LlavaVisionConfig`]):
115
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
116
+ load the weights associated with the model, only the configuration. Check out the
117
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
118
+ """
119
+
120
+
121
+ @add_start_docstrings(
122
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
123
+ LLAVA_START_DOCSTRING,
124
+ )
125
+ class LlavaPreTrainedModel(PreTrainedModel):
126
+ config_class = LlavaConfig
127
+ base_model_prefix = "model"
128
+ supports_gradient_checkpointing = True
129
+ _no_split_modules = ["LlavaVisionAttention"]
130
+ _skip_keys_device_placement = "past_key_values"
131
+ _supports_flash_attn_2 = True
132
+
133
+ def _init_weights(self, module):
134
+ # important: this ported version of Llava isn't meant for training from scratch - only
135
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
136
+ # https://github.com/haotian-liu/LLaVA/tree/main/llava should serve for that purpose
137
+ std = (
138
+ self.config.initializer_range
139
+ if hasattr(self.config, "initializer_range")
140
+ else self.config.text_config.initializer_range
141
+ )
142
+
143
+ if hasattr(module, "class_embedding"):
144
+ module.class_embedding.data.normal_(mean=0.0, std=std)
145
+
146
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
147
+ module.weight.data.normal_(mean=0.0, std=std)
148
+ if module.bias is not None:
149
+ module.bias.data.zero_()
150
+ elif isinstance(module, nn.Embedding):
151
+ module.weight.data.normal_(mean=0.0, std=std)
152
+ if module.padding_idx is not None:
153
+ module.weight.data[module.padding_idx].zero_()
154
+
155
+ @property
156
+ def _supports_sdpa(self):
157
+ """
158
+ Retrieve language_model's attribute to check whether the model supports
159
+ SDPA or not.
160
+ """
161
+ return self.language_model._supports_sdpa
162
+
163
+
164
+ LLAVA_INPUTS_DOCSTRING = r"""
165
+ Args:
166
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
167
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
168
+ it.
169
+
170
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
171
+ [`PreTrainedTokenizer.__call__`] for details.
172
+
173
+ [What are input IDs?](../glossary#input-ids)
174
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
175
+ The tensors corresponding to the input images. Pixel values can be obtained using
176
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
177
+ [`CLIPImageProcessor`] for processing images).
178
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
179
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
180
+
181
+ - 1 for tokens that are **not masked**,
182
+ - 0 for tokens that are **masked**.
183
+
184
+ [What are attention masks?](../glossary#attention-mask)
185
+
186
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
187
+ [`PreTrainedTokenizer.__call__`] for details.
188
+
189
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
190
+ `past_key_values`).
191
+
192
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
193
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
194
+ information on the default strategy.
195
+
196
+ - 1 indicates the head is **not masked**,
197
+ - 0 indicates the head is **masked**.
198
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
199
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
200
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
201
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
202
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
203
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
204
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
205
+
206
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
207
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
208
+
209
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
210
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
211
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
212
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
213
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
214
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
215
+ model's internal embedding lookup matrix.
216
+ vision_feature_layer (`int`, *optional*, defaults to -2):
217
+ The index of the layer to select the vision feature.
218
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
219
+ The feature selection strategy used to select the vision feature from the vision backbone.
220
+ Can be one of `"default"` or `"full"`.
221
+ use_cache (`bool`, *optional*):
222
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
223
+ `past_key_values`).
224
+ output_attentions (`bool`, *optional*):
225
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
226
+ tensors for more detail.
227
+ output_hidden_states (`bool`, *optional*):
228
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
229
+ more detail.
230
+ return_dict (`bool`, *optional*):
231
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
232
+ """
233
+
234
+
235
+ @add_start_docstrings(
236
+ """The LLAVA model which consists of a vision backbone and a language model.""",
237
+ LLAVA_START_DOCSTRING,
238
+ )
239
+ class LlavaForConditionalGeneration(LlavaPreTrainedModel):
240
+ def __init__(self, config: LlavaConfig):
241
+ super().__init__(config)
242
+ self.vision_tower = AutoModel.from_config(config.vision_config)
243
+
244
+ self.multi_modal_projector = LlavaMultiModalProjector(config)
245
+ self.vocab_size = config.text_config.vocab_size
246
+ self.language_model = AutoModelForCausalLM.from_config(
247
+ config.text_config, attn_implementation=config._attn_implementation
248
+ )
249
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
250
+ self.post_init()
251
+
252
+ def get_input_embeddings(self):
253
+ return self.language_model.get_input_embeddings()
254
+
255
+ def set_input_embeddings(self, value):
256
+ self.language_model.set_input_embeddings(value)
257
+
258
+ def get_output_embeddings(self):
259
+ return self.language_model.get_output_embeddings()
260
+
261
+ def set_output_embeddings(self, new_embeddings):
262
+ self.language_model.set_output_embeddings(new_embeddings)
263
+
264
+ def set_decoder(self, decoder):
265
+ self.language_model.set_decoder(decoder)
266
+
267
+ def get_decoder(self):
268
+ return self.language_model.get_decoder()
269
+
270
+ def tie_weights(self):
271
+ return self.language_model.tie_weights()
272
+
273
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
274
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
275
+ # update vocab size
276
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
277
+ self.vocab_size = model_embeds.num_embeddings
278
+ return model_embeds
279
+
280
+ def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
281
+ num_images, num_image_patches, embed_dim = image_features.shape
282
+ batch_size, sequence_length = input_ids.shape
283
+ left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
284
+ # 1. Create a mask to know where special image tokens are
285
+ special_image_token_mask = input_ids == self.config.image_token_index
286
+ num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
287
+ # Compute the maximum embed dimension
288
+ max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
289
+ batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
290
+
291
+ # 2. Compute the positions where text should be written
292
+ # Calculate new positions for text tokens in merged image-text sequence.
293
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
294
+ # `torch.cumsum` computes how each image token shifts subsequent text token positions.
295
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
296
+ new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
297
+ nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
298
+ if left_padding:
299
+ new_token_positions += nb_image_pad[:, None] # offset for left padding
300
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
301
+
302
+ # 3. Create the full embedding, already padded to the maximum position
303
+ final_embedding = torch.zeros(
304
+ batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
305
+ )
306
+ final_attention_mask = torch.zeros(
307
+ batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
308
+ )
309
+ if labels is not None:
310
+ final_labels = torch.full(
311
+ (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
312
+ )
313
+ # In case the Vision model or the Language model has been offloaded to CPU, we need to manually
314
+ # set the corresponding tensors into their correct target device.
315
+ target_device = inputs_embeds.device
316
+ batch_indices, non_image_indices, text_to_overwrite = (
317
+ batch_indices.to(target_device),
318
+ non_image_indices.to(target_device),
319
+ text_to_overwrite.to(target_device),
320
+ )
321
+ attention_mask = attention_mask.to(target_device)
322
+
323
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
324
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
325
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
326
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
327
+ if labels is not None:
328
+ final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
329
+
330
+ # 5. Fill the embeddings corresponding to the images. Anything that is still zeros needs filling
331
+ image_to_overwrite = torch.all(final_embedding == 0, dim=-1)
332
+ image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
333
+
334
+ if image_to_overwrite.sum() != image_features.shape[:-1].numel():
335
+ raise ValueError(
336
+ f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
337
+ f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
338
+ )
339
+
340
+ final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
341
+ final_attention_mask |= image_to_overwrite
342
+ position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
343
+
344
+ # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
345
+ batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id)
346
+ indices_to_mask = new_token_positions[batch_indices, pad_indices]
347
+
348
+ final_embedding[batch_indices, indices_to_mask] = 0
349
+
350
+ if labels is None:
351
+ final_labels = None
352
+
353
+ return final_embedding, final_attention_mask, final_labels, position_ids
354
+
355
+ @add_start_docstrings_to_model_forward(LLAVA_INPUTS_DOCSTRING)
356
+ @replace_return_docstrings(output_type=LlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
357
+ def forward(
358
+ self,
359
+ input_ids: torch.LongTensor = None,
360
+ pixel_values: torch.FloatTensor = None,
361
+ attention_mask: Optional[torch.Tensor] = None,
362
+ position_ids: Optional[torch.LongTensor] = None,
363
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
364
+ inputs_embeds: Optional[torch.FloatTensor] = None,
365
+ vision_feature_layer: Optional[int] = None,
366
+ vision_feature_select_strategy: Optional[str] = None,
367
+ labels: Optional[torch.LongTensor] = None,
368
+ use_cache: Optional[bool] = None,
369
+ output_attentions: Optional[bool] = None,
370
+ output_hidden_states: Optional[bool] = None,
371
+ return_dict: Optional[bool] = None,
372
+ ) -> Union[Tuple, LlavaCausalLMOutputWithPast]:
373
+ r"""
374
+ Args:
375
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
376
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
377
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
378
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
379
+
380
+ Returns:
381
+
382
+ Example:
383
+
384
+ ```python
385
+ >>> from PIL import Image
386
+ >>> import requests
387
+ >>> from transformers import AutoProcessor, LlavaForConditionalGeneration
388
+
389
+ >>> model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf")
390
+ >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
391
+
392
+ >>> prompt = "USER: <image>\nWhat's the content of the image? ASSISTANT:"
393
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
394
+ >>> image = Image.open(requests.get(url, stream=True).raw)
395
+
396
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
397
+
398
+ >>> # Generate
399
+ >>> generate_ids = model.generate(**inputs, max_new_tokens=15)
400
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
401
+ "USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed"
402
+ ```"""
403
+
404
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
405
+ output_hidden_states = (
406
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
407
+ )
408
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
409
+ vision_feature_layer = (
410
+ vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
411
+ )
412
+ vision_feature_select_strategy = (
413
+ vision_feature_select_strategy
414
+ if vision_feature_select_strategy is not None
415
+ else self.config.vision_feature_select_strategy
416
+ )
417
+
418
+ if inputs_embeds is None:
419
+ # 1. Extra the input embeddings
420
+ inputs_embeds = self.get_input_embeddings()(input_ids)
421
+
422
+ # 2. Merge text and images
423
+ if pixel_values is not None and input_ids.shape[1] != 1:
424
+ image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)
425
+ # this is not memory efficient at all (output_hidden_states=True) will save all the hidden stated.
426
+ selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
427
+
428
+ if vision_feature_select_strategy == "default":
429
+ selected_image_feature = selected_image_feature[:, 1:]
430
+ elif vision_feature_select_strategy == "full":
431
+ selected_image_feature = selected_image_feature
432
+ else:
433
+ raise ValueError(
434
+ f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}"
435
+ )
436
+
437
+ image_features = self.multi_modal_projector(selected_image_feature)
438
+ inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
439
+ image_features, inputs_embeds, input_ids, attention_mask, labels
440
+ )
441
+ if labels is None:
442
+ labels = torch.full_like(attention_mask, self.config.ignore_index).to(torch.long)
443
+
444
+ # In case input_ids.shape[1] == 1 & pixel_values==None & past_key_values != None, we are in the case of
445
+ # generation with cache
446
+ elif past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1:
447
+ # Retrieve the first layer to inspect the logits and mask out the hidden states
448
+ # that are set to 0
449
+ first_layer_past_key_value = past_key_values[0][0][:, :, :, 0]
450
+
451
+ # Sum all dimensions of head_dim (-2) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941
452
+ batch_index, non_attended_tokens = torch.where(first_layer_past_key_value.float().sum(-2) == 0)
453
+
454
+ # Get the target length
455
+ target_length = input_ids.shape[1]
456
+ past_length = first_layer_past_key_value.shape[-1]
457
+
458
+ extended_attention_mask = torch.ones(
459
+ (attention_mask.shape[0], past_length),
460
+ dtype=attention_mask.dtype,
461
+ device=attention_mask.device,
462
+ )
463
+
464
+ # Filter out only the tokens that can be un-attended, this can happen
465
+ # if one uses Llava + Fused modules where the cache on the
466
+ # first iteration is already big enough, or if one passes custom cache
467
+ valid_indices = non_attended_tokens < extended_attention_mask.size(-1)
468
+ new_batch_index = batch_index[valid_indices]
469
+ new_non_attended_tokens = non_attended_tokens[valid_indices]
470
+
471
+ # Zero-out the places where we don't need to attend
472
+ extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0
473
+
474
+ attention_mask = torch.cat((extended_attention_mask, attention_mask[:, -target_length:]), dim=1)
475
+ position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
476
+
477
+ outputs = self.language_model(
478
+ attention_mask=attention_mask,
479
+ position_ids=position_ids,
480
+ past_key_values=past_key_values,
481
+ inputs_embeds=inputs_embeds,
482
+ use_cache=use_cache,
483
+ output_attentions=output_attentions,
484
+ output_hidden_states=output_hidden_states,
485
+ return_dict=return_dict,
486
+ )
487
+
488
+ logits = outputs[0]
489
+
490
+ loss = None
491
+ if labels is not None:
492
+ # Shift so that tokens < n predict n
493
+ if attention_mask is not None:
494
+ shift_attention_mask = attention_mask[..., 1:]
495
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
496
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
497
+ else:
498
+ shift_logits = logits[..., :-1, :].contiguous()
499
+ shift_labels = labels[..., 1:].contiguous()
500
+ # Flatten the tokens
501
+ loss_fct = nn.CrossEntropyLoss()
502
+ loss = loss_fct(
503
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
504
+ )
505
+
506
+ if not return_dict:
507
+ output = (logits,) + outputs[1:]
508
+ return (loss,) + output if loss is not None else output
509
+
510
+ return LlavaCausalLMOutputWithPast(
511
+ loss=loss,
512
+ logits=logits,
513
+ past_key_values=outputs.past_key_values,
514
+ hidden_states=outputs.hidden_states,
515
+ attentions=outputs.attentions,
516
+ )
517
+
518
+ def prepare_inputs_for_generation(
519
+ self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, **kwargs
520
+ ):
521
+ if past_key_values is not None:
522
+ if isinstance(past_key_values, Cache):
523
+ cache_length = past_key_values.get_seq_length()
524
+ past_length = past_key_values.seen_tokens
525
+ else:
526
+ cache_length = past_length = past_key_values[0][0].shape[2]
527
+
528
+ # Keep only the unprocessed tokens:
529
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
530
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
531
+ # input)
532
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
533
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
534
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
535
+ # input_ids based on the past_length.
536
+ elif past_length < input_ids.shape[1]:
537
+ input_ids = input_ids[:, past_length:]
538
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
539
+ elif self.config.image_token_index in input_ids:
540
+ input_ids = input_ids[:, input_ids.shape[1] - 1 :]
541
+ # If the cache has seen more tokens than it can hold, then the cache has a size limit. Let's discard the
542
+ # older attention values, as their corresponding values are not part of the input.
543
+ if cache_length < past_length and attention_mask is not None:
544
+ attention_mask = attention_mask[:, -(cache_length + input_ids.shape[1]) :]
545
+
546
+ position_ids = kwargs.get("position_ids", None)
547
+ if attention_mask is not None and position_ids is None:
548
+ # create position_ids on the fly for batch generation
549
+ position_ids = attention_mask.long().cumsum(-1) - 1
550
+ position_ids.masked_fill_(attention_mask == 0, 1)
551
+ if past_key_values:
552
+ position_ids = position_ids[:, -input_ids.shape[1] :]
553
+
554
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
555
+ if inputs_embeds is not None and past_key_values is None:
556
+ model_inputs = {"inputs_embeds": inputs_embeds}
557
+ else:
558
+ model_inputs = {"input_ids": input_ids}
559
+
560
+ model_inputs.update(
561
+ {
562
+ "position_ids": position_ids,
563
+ "past_key_values": past_key_values,
564
+ "use_cache": kwargs.get("use_cache"),
565
+ "attention_mask": attention_mask,
566
+ "pixel_values": pixel_values,
567
+ }
568
+ )
569
+ return model_inputs
570
+
571
+ def _reorder_cache(self, *args, **kwargs):
572
+ return self.language_model._reorder_cache(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava/processing_llava.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Llava.
17
+ """
18
+
19
+
20
+ from typing import List, Optional, Union
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...image_utils import ImageInput
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
26
+ from ...utils import TensorType
27
+
28
+
29
+ class LlavaProcessor(ProcessorMixin):
30
+ r"""
31
+ Constructs a Llava processor which wraps a Llava image processor and a Llava tokenizer into a single processor.
32
+
33
+ [`LlavaProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
34
+ [`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information.
35
+
36
+ Args:
37
+ image_processor ([`CLIPImageProcessor`], *optional*):
38
+ The image processor is a required input.
39
+ tokenizer ([`LlamaTokenizerFast`], *optional*):
40
+ The tokenizer is a required input.
41
+ """
42
+
43
+ attributes = ["image_processor", "tokenizer"]
44
+ image_processor_class = "CLIPImageProcessor"
45
+ tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
46
+
47
+ def __init__(self, image_processor=None, tokenizer=None):
48
+ super().__init__(image_processor, tokenizer)
49
+
50
+ def __call__(
51
+ self,
52
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
53
+ images: ImageInput = None,
54
+ padding: Union[bool, str, PaddingStrategy] = False,
55
+ truncation: Union[bool, str, TruncationStrategy] = None,
56
+ max_length=None,
57
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
58
+ ) -> BatchFeature:
59
+ """
60
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
61
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
62
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
63
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
64
+ of the above two methods for more information.
65
+
66
+ Args:
67
+ text (`str`, `List[str]`, `List[List[str]]`):
68
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
69
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
70
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
71
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
72
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
73
+ tensor. Both channels-first and channels-last formats are supported.
74
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
75
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
76
+ index) among:
77
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
78
+ sequence if provided).
79
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
80
+ acceptable input length for the model if that argument is not provided.
81
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
82
+ lengths).
83
+ max_length (`int`, *optional*):
84
+ Maximum length of the returned list and optionally padding length (see above).
85
+ truncation (`bool`, *optional*):
86
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
87
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
88
+ If set, will return tensors of a particular framework. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return NumPy `np.ndarray` objects.
93
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
94
+
95
+ Returns:
96
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
97
+
98
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
99
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
100
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
101
+ `None`).
102
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
103
+ """
104
+ if images is not None:
105
+ pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
106
+ else:
107
+ pixel_values = None
108
+ text_inputs = self.tokenizer(
109
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
110
+ )
111
+
112
+ return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
113
+
114
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
115
+ def batch_decode(self, *args, **kwargs):
116
+ """
117
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
118
+ refer to the docstring of this method for more information.
119
+ """
120
+ return self.tokenizer.batch_decode(*args, **kwargs)
121
+
122
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
123
+ def decode(self, *args, **kwargs):
124
+ """
125
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
126
+ the docstring of this method for more information.
127
+ """
128
+ return self.tokenizer.decode(*args, **kwargs)
129
+
130
+ @property
131
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
132
+ def model_input_names(self):
133
+ tokenizer_input_names = self.tokenizer.model_input_names
134
+ image_processor_input_names = self.image_processor.model_input_names
135
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc ADDED
Binary file (9.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/convert_musicgen_transformers.cpython-310.pyc ADDED
Binary file (6.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc ADDED
Binary file (81 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MusicGen model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto.configuration_auto import AutoConfig
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class MusicgenDecoderConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a
31
+ MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a
32
+ configuration with the defaults will yield a similar configuration to that of the MusicGen
33
+ [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 2048):
41
+ Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be
42
+ represented by the `inputs_ids` passed when calling [`MusicgenDecoder`].
43
+ hidden_size (`int`, *optional*, defaults to 1024):
44
+ Dimensionality of the layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 24):
46
+ Number of decoder layers.
47
+ num_attention_heads (`int`, *optional*, defaults to 16):
48
+ Number of attention heads for each attention layer in the Transformer block.
49
+ ffn_dim (`int`, *optional*, defaults to 4096):
50
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
51
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
54
+ dropout (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
56
+ attention_dropout (`float`, *optional*, defaults to 0.0):
57
+ The dropout ratio for the attention probabilities.
58
+ activation_dropout (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for activations inside the fully connected layer.
60
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
61
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ initializer_factor (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ layerdrop (`float`, *optional*, defaults to 0.0):
66
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
67
+ for more details.
68
+ scale_embedding (`bool`, *optional*, defaults to `False`):
69
+ Scale embeddings by diving by sqrt(hidden_size).
70
+ use_cache (`bool`, *optional*, defaults to `True`):
71
+ Whether the model should return the last key/values attentions (not used by all models)
72
+ num_codebooks (`int`, *optional*, defaults to 4):
73
+ The number of parallel codebooks forwarded to the model.
74
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
75
+ Whether input and output word embeddings should be tied.
76
+ audio_channels (`int`, *optional*, defaults to 1
77
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate
78
+ audio stream for the left/right output channels. Mono models generate a single audio stream output.
79
+ """
80
+
81
+ model_type = "musicgen_decoder"
82
+ keys_to_ignore_at_inference = ["past_key_values"]
83
+
84
+ def __init__(
85
+ self,
86
+ vocab_size=2048,
87
+ max_position_embeddings=2048,
88
+ num_hidden_layers=24,
89
+ ffn_dim=4096,
90
+ num_attention_heads=16,
91
+ layerdrop=0.0,
92
+ use_cache=True,
93
+ activation_function="gelu",
94
+ hidden_size=1024,
95
+ dropout=0.1,
96
+ attention_dropout=0.0,
97
+ activation_dropout=0.0,
98
+ initializer_factor=0.02,
99
+ scale_embedding=False,
100
+ num_codebooks=4,
101
+ audio_channels=1,
102
+ pad_token_id=2048,
103
+ bos_token_id=2048,
104
+ eos_token_id=None,
105
+ tie_word_embeddings=False,
106
+ **kwargs,
107
+ ):
108
+ self.vocab_size = vocab_size
109
+ self.max_position_embeddings = max_position_embeddings
110
+ self.hidden_size = hidden_size
111
+ self.ffn_dim = ffn_dim
112
+ self.num_hidden_layers = num_hidden_layers
113
+ self.num_attention_heads = num_attention_heads
114
+ self.dropout = dropout
115
+ self.attention_dropout = attention_dropout
116
+ self.activation_dropout = activation_dropout
117
+ self.activation_function = activation_function
118
+ self.initializer_factor = initializer_factor
119
+ self.layerdrop = layerdrop
120
+ self.use_cache = use_cache
121
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
122
+ self.num_codebooks = num_codebooks
123
+
124
+ if audio_channels not in [1, 2]:
125
+ raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.")
126
+ self.audio_channels = audio_channels
127
+
128
+ super().__init__(
129
+ pad_token_id=pad_token_id,
130
+ bos_token_id=bos_token_id,
131
+ eos_token_id=eos_token_id,
132
+ tie_word_embeddings=tie_word_embeddings,
133
+ **kwargs,
134
+ )
135
+
136
+
137
+ class MusicgenConfig(PretrainedConfig):
138
+ r"""
139
+ This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a
140
+ MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder
141
+ configs.
142
+
143
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
144
+ documentation from [`PretrainedConfig`] for more information.
145
+
146
+ Args:
147
+ kwargs (*optional*):
148
+ Dictionary of keyword arguments. Notably:
149
+
150
+ - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
151
+ defines the text encoder config.
152
+ - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
153
+ defines the audio encoder config.
154
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
155
+ the decoder config.
156
+
157
+ Example:
158
+
159
+ ```python
160
+ >>> from transformers import (
161
+ ... MusicgenConfig,
162
+ ... MusicgenDecoderConfig,
163
+ ... T5Config,
164
+ ... EncodecConfig,
165
+ ... MusicgenForConditionalGeneration,
166
+ ... )
167
+
168
+ >>> # Initializing text encoder, audio encoder, and decoder model configurations
169
+ >>> text_encoder_config = T5Config()
170
+ >>> audio_encoder_config = EncodecConfig()
171
+ >>> decoder_config = MusicgenDecoderConfig()
172
+
173
+ >>> configuration = MusicgenConfig.from_sub_models_config(
174
+ ... text_encoder_config, audio_encoder_config, decoder_config
175
+ ... )
176
+
177
+ >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration
178
+ >>> model = MusicgenForConditionalGeneration(configuration)
179
+
180
+ >>> # Accessing the model configuration
181
+ >>> configuration = model.config
182
+ >>> config_text_encoder = model.config.text_encoder
183
+ >>> config_audio_encoder = model.config.audio_encoder
184
+ >>> config_decoder = model.config.decoder
185
+
186
+ >>> # Saving the model, including its configuration
187
+ >>> model.save_pretrained("musicgen-model")
188
+
189
+ >>> # loading model and config from pretrained folder
190
+ >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model")
191
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config)
192
+ ```"""
193
+
194
+ model_type = "musicgen"
195
+ is_composition = True
196
+
197
+ def __init__(self, **kwargs):
198
+ super().__init__(**kwargs)
199
+ if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs:
200
+ raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config")
201
+
202
+ text_encoder_config = kwargs.pop("text_encoder")
203
+ text_encoder_model_type = text_encoder_config.pop("model_type")
204
+
205
+ audio_encoder_config = kwargs.pop("audio_encoder")
206
+ audio_encoder_model_type = audio_encoder_config.pop("model_type")
207
+
208
+ decoder_config = kwargs.pop("decoder")
209
+
210
+ self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config)
211
+ self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config)
212
+ self.decoder = MusicgenDecoderConfig(**decoder_config)
213
+ self.is_encoder_decoder = True
214
+
215
+ @classmethod
216
+ def from_sub_models_config(
217
+ cls,
218
+ text_encoder_config: PretrainedConfig,
219
+ audio_encoder_config: PretrainedConfig,
220
+ decoder_config: MusicgenDecoderConfig,
221
+ **kwargs,
222
+ ):
223
+ r"""
224
+ Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder
225
+ configurations.
226
+
227
+ Returns:
228
+ [`MusicgenConfig`]: An instance of a configuration object
229
+ """
230
+
231
+ return cls(
232
+ text_encoder=text_encoder_config.to_dict(),
233
+ audio_encoder=audio_encoder_config.to_dict(),
234
+ decoder=decoder_config.to_dict(),
235
+ **kwargs,
236
+ )
237
+
238
+ @property
239
+ # This is a property because you might want to change the codec model on the fly
240
+ def sampling_rate(self):
241
+ return self.audio_encoder.sampling_rate
242
+
243
+ @property
244
+ def _attn_implementation(self):
245
+ # This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.)
246
+ if hasattr(self, "_attn_implementation_internal"):
247
+ if self._attn_implementation_internal is None:
248
+ # `config.attn_implementation` should never be None, for backward compatibility.
249
+ return "eager"
250
+ else:
251
+ return self._attn_implementation_internal
252
+ else:
253
+ return "eager"
254
+
255
+ @_attn_implementation.setter
256
+ def _attn_implementation(self, value):
257
+ self._attn_implementation_internal = value
258
+ self.decoder._attn_implementation = value
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 BigCode and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_starcoder2"] = [
35
+ "Starcoder2ForCausalLM",
36
+ "Starcoder2Model",
37
+ "Starcoder2PreTrainedModel",
38
+ "Starcoder2ForSequenceClassification",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .modeling_starcoder2 import (
52
+ Starcoder2ForCausalLM,
53
+ Starcoder2ForSequenceClassification,
54
+ Starcoder2Model,
55
+ Starcoder2PreTrainedModel,
56
+ )
57
+
58
+
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (968 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc ADDED
Binary file (38.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Starcoder2 model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class Starcoder2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
30
+ Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b_16k](https://huggingface.co/bigcode/starcoder2-7b_16k) model.
32
+
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 49152):
40
+ Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`Starcoder2Model`]
42
+ hidden_size (`int`, *optional*, defaults to 3072):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 12288):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 30):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 24):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ num_key_value_heads (`int`, *optional*, defaults to 2):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
60
+ The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
61
+ allows sequence of up to 4096*32 tokens.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ norm_epsilon (`float`, *optional*, defaults to 1e-05):
65
+ Epsilon value for the layer norm
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ bos_token_id (`int`, *optional*, defaults to 50256):
70
+ The id of the "beginning-of-sequence" token.
71
+ eos_token_id (`int`, *optional*, defaults to 50256):
72
+ The id of the "end-of-sequence" token.
73
+ rope_theta (`float`, *optional*, defaults to 10000.0):
74
+ The base period of the RoPE embeddings.
75
+ sliding_window (`int`, *optional*):
76
+ Sliding window attention window size. If not specified, will default to `None` (no sliding window).
77
+ attention_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout ratio for the attention probabilities.
79
+ residual_dropout (`float`, *optional*, defaults to 0.0):
80
+ Residual connection dropout value.
81
+ embedding_dropout (`float`, *optional*, defaults to 0.0):
82
+ Embedding dropout.
83
+ use_bias (`bool`, *optional*, defaults to `True`):
84
+ Whether to use bias term on linear layers of the model.
85
+
86
+
87
+ ```python
88
+ >>> from transformers import Starcoder2Model, Starcoder2Config
89
+
90
+ >>> # Initializing a Starcoder2 7B style configuration
91
+ >>> configuration = Starcoder2Config()
92
+
93
+ >>> # Initializing a model from the Starcoder2 7B style configuration
94
+ >>> model = Starcoder2Model(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "starcoder2"
101
+ keys_to_ignore_at_inference = ["past_key_values"]
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=49152,
106
+ hidden_size=3072,
107
+ intermediate_size=12288,
108
+ num_hidden_layers=30,
109
+ num_attention_heads=24,
110
+ num_key_value_heads=2,
111
+ hidden_act="gelu_pytorch_tanh",
112
+ max_position_embeddings=4096,
113
+ initializer_range=0.018042,
114
+ norm_epsilon=1e-5,
115
+ use_cache=True,
116
+ bos_token_id=50256,
117
+ eos_token_id=50256,
118
+ rope_theta=10000.0,
119
+ sliding_window=None,
120
+ attention_dropout=0.0,
121
+ residual_dropout=0.0,
122
+ embedding_dropout=0.0,
123
+ use_bias=True,
124
+ **kwargs,
125
+ ):
126
+ self.vocab_size = vocab_size
127
+ self.max_position_embeddings = max_position_embeddings
128
+ self.hidden_size = hidden_size
129
+ self.intermediate_size = intermediate_size
130
+ self.num_hidden_layers = num_hidden_layers
131
+ self.num_attention_heads = num_attention_heads
132
+ self.sliding_window = sliding_window
133
+ self.use_bias = use_bias
134
+ self.num_key_value_heads = num_key_value_heads
135
+ self.hidden_act = hidden_act
136
+ self.initializer_range = initializer_range
137
+ self.norm_epsilon = norm_epsilon
138
+ self.use_cache = use_cache
139
+ self.rope_theta = rope_theta
140
+ self.attention_dropout = attention_dropout
141
+ self.residual_dropout = residual_dropout
142
+ self.embedding_dropout = embedding_dropout
143
+
144
+ super().__init__(
145
+ bos_token_id=bos_token_id,
146
+ eos_token_id=eos_token_id,
147
+ **kwargs,
148
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py ADDED
@@ -0,0 +1,1378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Starcoder2 model."""
21
+ import inspect
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from ...activations import ACT2FN
33
+ from ...cache_utils import Cache, DynamicCache
34
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
35
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ is_flash_attn_2_available,
41
+ is_flash_attn_greater_or_equal_2_10,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_starcoder2 import Starcoder2Config
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CONFIG_FOR_DOC = "Starcoder2Config"
58
+
59
+
60
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
61
+ def _get_unpad_data(attention_mask):
62
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
63
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
64
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
65
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
66
+ return (
67
+ indices,
68
+ cu_seqlens,
69
+ max_seqlen_in_batch,
70
+ )
71
+
72
+
73
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Starcoder2
74
+ class Starcoder2RotaryEmbedding(nn.Module):
75
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
76
+ super().__init__()
77
+
78
+ self.dim = dim
79
+ self.max_position_embeddings = max_position_embeddings
80
+ self.base = base
81
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
82
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
83
+
84
+ # Build here to make `torch.jit.trace` work.
85
+ self._set_cos_sin_cache(
86
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
87
+ )
88
+
89
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
90
+ self.max_seq_len_cached = seq_len
91
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
92
+
93
+ freqs = torch.outer(t, self.inv_freq)
94
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
95
+ emb = torch.cat((freqs, freqs), dim=-1)
96
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
97
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
98
+
99
+ def forward(self, x, seq_len=None):
100
+ # x: [bs, num_attention_heads, seq_len, head_size]
101
+ if seq_len > self.max_seq_len_cached:
102
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
103
+
104
+ return (
105
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
106
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
107
+ )
108
+
109
+
110
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
111
+ def rotate_half(x):
112
+ """Rotates half the hidden dims of the input."""
113
+ x1 = x[..., : x.shape[-1] // 2]
114
+ x2 = x[..., x.shape[-1] // 2 :]
115
+ return torch.cat((-x2, x1), dim=-1)
116
+
117
+
118
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
119
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
120
+ """Applies Rotary Position Embedding to the query and key tensors.
121
+
122
+ Args:
123
+ q (`torch.Tensor`): The query tensor.
124
+ k (`torch.Tensor`): The key tensor.
125
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
126
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
127
+ position_ids (`torch.Tensor`):
128
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
129
+ used to pass offsetted position ids when working with a KV-cache.
130
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
131
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
132
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
133
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
134
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
135
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
136
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
137
+ Returns:
138
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
139
+ """
140
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
141
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
142
+ q_embed = (q * cos) + (rotate_half(q) * sin)
143
+ k_embed = (k * cos) + (rotate_half(k) * sin)
144
+ return q_embed, k_embed
145
+
146
+
147
+ class Starcoder2MLP(nn.Module):
148
+ def __init__(self, config: Starcoder2Config):
149
+ super().__init__()
150
+ embed_dim = config.hidden_size
151
+ self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
152
+ self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
153
+ self.act = ACT2FN[config.hidden_act]
154
+ self.residual_dropout = config.residual_dropout
155
+
156
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
157
+ hidden_states = self.c_fc(hidden_states)
158
+ hidden_states = self.act(hidden_states)
159
+ hidden_states = self.c_proj(hidden_states)
160
+ hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
161
+ return hidden_states
162
+
163
+
164
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
165
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
166
+ """
167
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
168
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
169
+ """
170
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
171
+ if n_rep == 1:
172
+ return hidden_states
173
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
174
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
175
+
176
+
177
+ class Starcoder2Attention(nn.Module):
178
+ """
179
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
180
+ and "Generating Long Sequences with Sparse Transformers".
181
+ """
182
+
183
+ def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
184
+ super().__init__()
185
+ self.config = config
186
+ self.layer_idx = layer_idx
187
+ if layer_idx is None:
188
+ logger.warning_once(
189
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
190
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
191
+ "when creating this class."
192
+ )
193
+
194
+ self.hidden_size = config.hidden_size
195
+ self.num_heads = config.num_attention_heads
196
+ self.head_dim = self.hidden_size // self.num_heads
197
+ self.num_key_value_heads = config.num_key_value_heads
198
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
199
+ self.max_position_embeddings = config.max_position_embeddings
200
+ self.rope_theta = config.rope_theta
201
+ self.use_bias = config.use_bias
202
+ self.is_causal = True
203
+ self.attention_dropout = config.attention_dropout
204
+ self.residual_dropout = config.residual_dropout
205
+
206
+ if (self.head_dim * self.num_heads) != self.hidden_size:
207
+ raise ValueError(
208
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
209
+ f" and `num_heads`: {self.num_heads})."
210
+ )
211
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=self.use_bias)
212
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.use_bias)
213
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.use_bias)
214
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=self.use_bias)
215
+
216
+ self.rotary_emb = Starcoder2RotaryEmbedding(
217
+ self.head_dim,
218
+ max_position_embeddings=self.max_position_embeddings,
219
+ base=self.rope_theta,
220
+ )
221
+
222
+ def forward(
223
+ self,
224
+ hidden_states: torch.Tensor,
225
+ attention_mask: Optional[torch.Tensor] = None,
226
+ position_ids: Optional[torch.LongTensor] = None,
227
+ past_key_value: Optional[Cache] = None,
228
+ output_attentions: bool = False,
229
+ use_cache: bool = False,
230
+ **kwargs,
231
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
232
+ if "padding_mask" in kwargs:
233
+ warnings.warn(
234
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
235
+ )
236
+ bsz, q_len, _ = hidden_states.size()
237
+
238
+ query_states = self.q_proj(hidden_states)
239
+ key_states = self.k_proj(hidden_states)
240
+ value_states = self.v_proj(hidden_states)
241
+
242
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
243
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
244
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
245
+
246
+ kv_seq_len = key_states.shape[-2]
247
+ if past_key_value is not None:
248
+ if self.layer_idx is None:
249
+ raise ValueError(
250
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
251
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
252
+ "with a layer index."
253
+ )
254
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
255
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
256
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
257
+
258
+ if past_key_value is not None:
259
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
260
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
261
+
262
+ # repeat k/v heads if n_kv_heads < n_heads
263
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
264
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
265
+
266
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
267
+
268
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
269
+ raise ValueError(
270
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
271
+ f" {attn_weights.size()}"
272
+ )
273
+
274
+ if attention_mask is not None:
275
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
276
+ raise ValueError(
277
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
278
+ )
279
+
280
+ attn_weights = attn_weights + attention_mask
281
+
282
+ # upcast attention to fp32
283
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
284
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
285
+ attn_output = torch.matmul(attn_weights, value_states)
286
+
287
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
288
+ raise ValueError(
289
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
290
+ f" {attn_output.size()}"
291
+ )
292
+
293
+ attn_output = attn_output.transpose(1, 2).contiguous()
294
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
295
+
296
+ attn_output = self.o_proj(attn_output)
297
+ attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training)
298
+
299
+ if not output_attentions:
300
+ attn_weights = None
301
+
302
+ return attn_output, attn_weights, past_key_value
303
+
304
+
305
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Starcoder2
306
+ class Starcoder2FlashAttention2(Starcoder2Attention):
307
+ """
308
+ Starcoder2 flash attention module. This module inherits from `Starcoder2Attention` as the weights of the module stays
309
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
310
+ flash attention and deal with padding tokens in case the input contains any of them.
311
+ """
312
+
313
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
314
+ def __init__(self, *args, **kwargs):
315
+ super().__init__(*args, **kwargs)
316
+
317
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
318
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
319
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
320
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
321
+
322
+ # Ignore copy
323
+ def forward(
324
+ self,
325
+ hidden_states: torch.Tensor,
326
+ attention_mask: Optional[torch.Tensor] = None,
327
+ position_ids: Optional[torch.LongTensor] = None,
328
+ past_key_value: Optional[Cache] = None,
329
+ output_attentions: bool = False,
330
+ use_cache: bool = False,
331
+ **kwargs,
332
+ ):
333
+ if "padding_mask" in kwargs:
334
+ warnings.warn(
335
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
336
+ )
337
+
338
+ # overwrite attention_mask with padding_mask
339
+ attention_mask = kwargs.pop("padding_mask")
340
+ bsz, q_len, _ = hidden_states.size()
341
+
342
+ query_states = self.q_proj(hidden_states)
343
+ key_states = self.k_proj(hidden_states)
344
+ value_states = self.v_proj(hidden_states)
345
+
346
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
347
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
348
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
349
+
350
+ kv_seq_len = key_states.shape[-2]
351
+ if past_key_value is not None:
352
+ if self.layer_idx is None:
353
+ raise ValueError(
354
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
355
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
356
+ "with a layer index."
357
+ )
358
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
359
+
360
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
361
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
362
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
363
+
364
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
365
+
366
+ use_sliding_windows = (
367
+ _flash_supports_window_size
368
+ and getattr(self.config, "sliding_window", None) is not None
369
+ and kv_seq_len > self.config.sliding_window
370
+ )
371
+
372
+ if not _flash_supports_window_size:
373
+ logger.warning_once(
374
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
375
+ " make sure to upgrade flash-attn library."
376
+ )
377
+
378
+ if past_key_value is not None:
379
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
380
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
381
+ if (
382
+ getattr(self.config, "sliding_window", None) is not None
383
+ and kv_seq_len > self.config.sliding_window
384
+ and cache_has_contents
385
+ ):
386
+ slicing_tokens = 1 - self.config.sliding_window
387
+
388
+ past_key = past_key_value[self.layer_idx][0]
389
+ past_value = past_key_value[self.layer_idx][1]
390
+
391
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
392
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
393
+
394
+ if past_key.shape[-2] != self.config.sliding_window - 1:
395
+ raise ValueError(
396
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
397
+ f" {past_key.shape}"
398
+ )
399
+
400
+ if attention_mask is not None:
401
+ attention_mask = attention_mask[:, slicing_tokens:]
402
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
403
+
404
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
405
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
406
+
407
+ # repeat k/v heads if n_kv_heads < n_heads
408
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
409
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
410
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
411
+
412
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
413
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
414
+ # cast them back in float16 just to be sure everything works as expected.
415
+ input_dtype = query_states.dtype
416
+ if input_dtype == torch.float32:
417
+ if torch.is_autocast_enabled():
418
+ target_dtype = torch.get_autocast_gpu_dtype()
419
+ # Handle the case where the model is quantized
420
+ elif hasattr(self.config, "_pre_quantization_dtype"):
421
+ target_dtype = self.config._pre_quantization_dtype
422
+ else:
423
+ target_dtype = self.q_proj.weight.dtype
424
+
425
+ logger.warning_once(
426
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
427
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
428
+ f" {target_dtype}."
429
+ )
430
+
431
+ query_states = query_states.to(target_dtype)
432
+ key_states = key_states.to(target_dtype)
433
+ value_states = value_states.to(target_dtype)
434
+
435
+ # Reashape to the expected shape for Flash Attention
436
+ query_states = query_states.transpose(1, 2)
437
+ key_states = key_states.transpose(1, 2)
438
+ value_states = value_states.transpose(1, 2)
439
+
440
+ attn_output = self._flash_attention_forward(
441
+ query_states,
442
+ key_states,
443
+ value_states,
444
+ attention_mask,
445
+ q_len,
446
+ dropout=dropout_rate,
447
+ use_sliding_windows=use_sliding_windows,
448
+ )
449
+
450
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
451
+ attn_output = self.o_proj(attn_output)
452
+ attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training)
453
+
454
+ if not output_attentions:
455
+ attn_weights = None
456
+
457
+ return attn_output, attn_weights, past_key_value
458
+
459
+ def _flash_attention_forward(
460
+ self,
461
+ query_states,
462
+ key_states,
463
+ value_states,
464
+ attention_mask,
465
+ query_length,
466
+ dropout=0.0,
467
+ softmax_scale=None,
468
+ use_sliding_windows=False,
469
+ ):
470
+ """
471
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
472
+ first unpad the input, then computes the attention scores and pad the final attention scores.
473
+
474
+ Args:
475
+ query_states (`torch.Tensor`):
476
+ Input query states to be passed to Flash Attention API
477
+ key_states (`torch.Tensor`):
478
+ Input key states to be passed to Flash Attention API
479
+ value_states (`torch.Tensor`):
480
+ Input value states to be passed to Flash Attention API
481
+ attention_mask (`torch.Tensor`):
482
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
483
+ position of padding tokens and 1 for the position of non-padding tokens.
484
+ dropout (`float`):
485
+ Attention dropout
486
+ softmax_scale (`float`, *optional*):
487
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
488
+ use_sliding_windows (`bool`, *optional*):
489
+ Whether to activate sliding window attention.
490
+ """
491
+ if not self._flash_attn_uses_top_left_mask:
492
+ causal = self.is_causal
493
+ else:
494
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
495
+ causal = self.is_causal and query_length != 1
496
+
497
+ # Contains at least one padding token in the sequence
498
+ if attention_mask is not None:
499
+ batch_size = query_states.shape[0]
500
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
501
+ query_states, key_states, value_states, attention_mask, query_length
502
+ )
503
+
504
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
505
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
506
+
507
+ if not use_sliding_windows:
508
+ attn_output_unpad = flash_attn_varlen_func(
509
+ query_states,
510
+ key_states,
511
+ value_states,
512
+ cu_seqlens_q=cu_seqlens_q,
513
+ cu_seqlens_k=cu_seqlens_k,
514
+ max_seqlen_q=max_seqlen_in_batch_q,
515
+ max_seqlen_k=max_seqlen_in_batch_k,
516
+ dropout_p=dropout,
517
+ softmax_scale=softmax_scale,
518
+ causal=causal,
519
+ )
520
+ else:
521
+ attn_output_unpad = flash_attn_varlen_func(
522
+ query_states,
523
+ key_states,
524
+ value_states,
525
+ cu_seqlens_q=cu_seqlens_q,
526
+ cu_seqlens_k=cu_seqlens_k,
527
+ max_seqlen_q=max_seqlen_in_batch_q,
528
+ max_seqlen_k=max_seqlen_in_batch_k,
529
+ dropout_p=dropout,
530
+ softmax_scale=softmax_scale,
531
+ causal=causal,
532
+ window_size=(self.config.sliding_window, self.config.sliding_window),
533
+ )
534
+
535
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
536
+ else:
537
+ if not use_sliding_windows:
538
+ attn_output = flash_attn_func(
539
+ query_states,
540
+ key_states,
541
+ value_states,
542
+ dropout,
543
+ softmax_scale=softmax_scale,
544
+ causal=causal,
545
+ )
546
+ else:
547
+ attn_output = flash_attn_func(
548
+ query_states,
549
+ key_states,
550
+ value_states,
551
+ dropout,
552
+ softmax_scale=softmax_scale,
553
+ causal=causal,
554
+ window_size=(self.config.sliding_window, self.config.sliding_window),
555
+ )
556
+
557
+ return attn_output
558
+
559
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
560
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
561
+
562
+ # On the first iteration we need to properly re-create the padding mask
563
+ # by slicing it on the proper place
564
+ if kv_seq_len != attention_mask.shape[-1]:
565
+ attention_mask_num_tokens = attention_mask.shape[-1]
566
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
567
+
568
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
569
+
570
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
571
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
572
+
573
+ if query_length == kv_seq_len:
574
+ query_layer = index_first_axis(
575
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
576
+ )
577
+ cu_seqlens_q = cu_seqlens_k
578
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
579
+ indices_q = indices_k
580
+ elif query_length == 1:
581
+ max_seqlen_in_batch_q = 1
582
+ cu_seqlens_q = torch.arange(
583
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
584
+ ) # There is a memcpy here, that is very bad.
585
+ indices_q = cu_seqlens_q[:-1]
586
+ query_layer = query_layer.squeeze(1)
587
+ else:
588
+ # The -q_len: slice assumes left padding.
589
+ attention_mask = attention_mask[:, -query_length:]
590
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
591
+
592
+ return (
593
+ query_layer,
594
+ key_layer,
595
+ value_layer,
596
+ indices_q,
597
+ (cu_seqlens_q, cu_seqlens_k),
598
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
599
+ )
600
+
601
+
602
+ # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Starcoder2
603
+ class Starcoder2SdpaAttention(Starcoder2Attention):
604
+ """
605
+ Starcoder2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
606
+ `Starcoder2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
607
+ SDPA API.
608
+ """
609
+
610
+ # Ignore copy
611
+ def forward(
612
+ self,
613
+ hidden_states: torch.Tensor,
614
+ attention_mask: Optional[torch.Tensor] = None,
615
+ position_ids: Optional[torch.LongTensor] = None,
616
+ past_key_value: Optional[Cache] = None,
617
+ output_attentions: bool = False,
618
+ use_cache: bool = False,
619
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
620
+ if output_attentions:
621
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
622
+ logger.warning_once(
623
+ "Starcoder2Model is using Starcoder2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
624
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
625
+ )
626
+ return super().forward(
627
+ hidden_states=hidden_states,
628
+ attention_mask=attention_mask,
629
+ position_ids=position_ids,
630
+ past_key_value=past_key_value,
631
+ output_attentions=output_attentions,
632
+ use_cache=use_cache,
633
+ )
634
+
635
+ bsz, q_len, _ = hidden_states.size()
636
+
637
+ query_states = self.q_proj(hidden_states)
638
+ key_states = self.k_proj(hidden_states)
639
+ value_states = self.v_proj(hidden_states)
640
+
641
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
642
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
643
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
644
+
645
+ kv_seq_len = key_states.shape[-2]
646
+ if past_key_value is not None:
647
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
648
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
649
+
650
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
651
+
652
+ if past_key_value is not None:
653
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
654
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
655
+
656
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
657
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
658
+
659
+ if attention_mask is not None:
660
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
661
+ raise ValueError(
662
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
663
+ )
664
+
665
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
666
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
667
+ if query_states.device.type == "cuda" and attention_mask is not None:
668
+ query_states = query_states.contiguous()
669
+ key_states = key_states.contiguous()
670
+ value_states = value_states.contiguous()
671
+
672
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
673
+ query_states,
674
+ key_states,
675
+ value_states,
676
+ attn_mask=attention_mask,
677
+ dropout_p=self.attention_dropout if self.training else 0.0,
678
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
679
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
680
+ )
681
+
682
+ attn_output = attn_output.transpose(1, 2).contiguous()
683
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
684
+
685
+ attn_output = self.o_proj(attn_output)
686
+ # The difference with Mistral is that here it uses dropout
687
+ attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training)
688
+
689
+ return attn_output, None, past_key_value
690
+
691
+
692
+ STARCODER2_ATTENTION_CLASSES = {
693
+ "eager": Starcoder2Attention,
694
+ "flash_attention_2": Starcoder2FlashAttention2,
695
+ "sdpa": Starcoder2SdpaAttention,
696
+ }
697
+
698
+
699
+ class Starcoder2DecoderLayer(nn.Module):
700
+ def __init__(self, config: Starcoder2Config, layer_idx: int):
701
+ super().__init__()
702
+ self.hidden_size = config.hidden_size
703
+
704
+ self.self_attn = STARCODER2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
705
+
706
+ self.mlp = Starcoder2MLP(config)
707
+
708
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
709
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
710
+
711
+ # Copied from transformers.models.mistral.modeling_mistral.MistralDecoderLayer.forward
712
+ def forward(
713
+ self,
714
+ hidden_states: torch.Tensor,
715
+ attention_mask: Optional[torch.Tensor] = None,
716
+ position_ids: Optional[torch.LongTensor] = None,
717
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
718
+ output_attentions: Optional[bool] = False,
719
+ use_cache: Optional[bool] = False,
720
+ **kwargs,
721
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
722
+ if "padding_mask" in kwargs:
723
+ warnings.warn(
724
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
725
+ )
726
+ """
727
+ Args:
728
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
729
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
730
+ `(batch, sequence_length)` where padding elements are indicated by 0.
731
+ output_attentions (`bool`, *optional*):
732
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
733
+ returned tensors for more detail.
734
+ use_cache (`bool`, *optional*):
735
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
736
+ (see `past_key_values`).
737
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
738
+ """
739
+
740
+ residual = hidden_states
741
+
742
+ hidden_states = self.input_layernorm(hidden_states)
743
+
744
+ # Self Attention
745
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
746
+ hidden_states=hidden_states,
747
+ attention_mask=attention_mask,
748
+ position_ids=position_ids,
749
+ past_key_value=past_key_value,
750
+ output_attentions=output_attentions,
751
+ use_cache=use_cache,
752
+ )
753
+ hidden_states = residual + hidden_states
754
+
755
+ # Fully Connected
756
+ residual = hidden_states
757
+ hidden_states = self.post_attention_layernorm(hidden_states)
758
+ hidden_states = self.mlp(hidden_states)
759
+ hidden_states = residual + hidden_states
760
+
761
+ outputs = (hidden_states,)
762
+
763
+ if output_attentions:
764
+ outputs += (self_attn_weights,)
765
+
766
+ if use_cache:
767
+ outputs += (present_key_value,)
768
+
769
+ return outputs
770
+
771
+
772
+ STARCODER2_START_DOCSTRING = r"""
773
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
774
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
775
+ etc.)
776
+
777
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
778
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
779
+ and behavior.
780
+
781
+ Parameters:
782
+ config ([`Starcoder2Config`]):
783
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
784
+ load the weights associated with the model, only the configuration. Check out the
785
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
786
+ """
787
+
788
+
789
+ @add_start_docstrings(
790
+ "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.",
791
+ STARCODER2_START_DOCSTRING,
792
+ )
793
+ # Copied from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->Starcoder2
794
+ class Starcoder2PreTrainedModel(PreTrainedModel):
795
+ config_class = Starcoder2Config
796
+ base_model_prefix = "model"
797
+ supports_gradient_checkpointing = True
798
+ _no_split_modules = ["Starcoder2DecoderLayer"]
799
+ _skip_keys_device_placement = "past_key_values"
800
+ _supports_flash_attn_2 = True
801
+ _supports_sdpa = True
802
+ _supports_cache_class = True
803
+
804
+ def _init_weights(self, module):
805
+ std = self.config.initializer_range
806
+ if isinstance(module, nn.Linear):
807
+ module.weight.data.normal_(mean=0.0, std=std)
808
+ if module.bias is not None:
809
+ module.bias.data.zero_()
810
+ elif isinstance(module, nn.Embedding):
811
+ module.weight.data.normal_(mean=0.0, std=std)
812
+ if module.padding_idx is not None:
813
+ module.weight.data[module.padding_idx].zero_()
814
+
815
+
816
+ STARCODER2_INPUTS_DOCSTRING = r"""
817
+ Args:
818
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
819
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
820
+ it.
821
+
822
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
823
+ [`PreTrainedTokenizer.__call__`] for details.
824
+
825
+ [What are input IDs?](../glossary#input-ids)
826
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
827
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
828
+
829
+ - 1 for tokens that are **not masked**,
830
+ - 0 for tokens that are **masked**.
831
+
832
+ [What are attention masks?](../glossary#attention-mask)
833
+
834
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
835
+ [`PreTrainedTokenizer.__call__`] for details.
836
+
837
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
838
+ `past_key_values`).
839
+
840
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
841
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
842
+ information on the default strategy.
843
+
844
+ - 1 indicates the head is **not masked**,
845
+ - 0 indicates the head is **masked**.
846
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
847
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
848
+ config.n_positions - 1]`.
849
+
850
+ [What are position IDs?](../glossary#position-ids)
851
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
852
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
853
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
854
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
855
+
856
+ Two formats are allowed:
857
+ - a [`~cache_utils.Cache`] instance;
858
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
859
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
860
+ cache format.
861
+
862
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
863
+ legacy cache format will be returned.
864
+
865
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
866
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
867
+ of shape `(batch_size, sequence_length)`.
868
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
869
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
870
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
871
+ model's internal embedding lookup matrix.
872
+ use_cache (`bool`, *optional*):
873
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
874
+ `past_key_values`).
875
+ output_attentions (`bool`, *optional*):
876
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
877
+ tensors for more detail.
878
+ output_hidden_states (`bool`, *optional*):
879
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
880
+ more detail.
881
+ return_dict (`bool`, *optional*):
882
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
883
+ """
884
+
885
+
886
+ @add_start_docstrings(
887
+ "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.",
888
+ STARCODER2_START_DOCSTRING,
889
+ )
890
+ class Starcoder2Model(Starcoder2PreTrainedModel):
891
+ """
892
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Starcoder2DecoderLayer`]
893
+
894
+ Args:
895
+ config: Starcoder2Config
896
+ """
897
+
898
+ def __init__(self, config: Starcoder2Config):
899
+ super().__init__(config)
900
+ self.padding_idx = config.pad_token_id
901
+ self.vocab_size = config.vocab_size
902
+
903
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
904
+ self.embedding_dropout = config.embedding_dropout
905
+ self.layers = nn.ModuleList(
906
+ [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
907
+ )
908
+ self._attn_implementation = config._attn_implementation
909
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
910
+ self.gradient_checkpointing = False
911
+ # Initialize weights and apply final processing
912
+ self.post_init()
913
+
914
+ def get_input_embeddings(self):
915
+ return self.embed_tokens
916
+
917
+ def set_input_embeddings(self, value):
918
+ self.embed_tokens = value
919
+
920
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
921
+ def forward(
922
+ self,
923
+ input_ids: torch.LongTensor = None,
924
+ attention_mask: Optional[torch.Tensor] = None,
925
+ position_ids: Optional[torch.LongTensor] = None,
926
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
927
+ inputs_embeds: Optional[torch.FloatTensor] = None,
928
+ use_cache: Optional[bool] = None,
929
+ output_attentions: Optional[bool] = None,
930
+ output_hidden_states: Optional[bool] = None,
931
+ return_dict: Optional[bool] = None,
932
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
933
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
934
+ output_hidden_states = (
935
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
936
+ )
937
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
938
+
939
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
940
+
941
+ # retrieve input_ids and inputs_embeds
942
+ if input_ids is not None and inputs_embeds is not None:
943
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
944
+ elif input_ids is not None:
945
+ batch_size, seq_length = input_ids.shape
946
+ elif inputs_embeds is not None:
947
+ batch_size, seq_length, _ = inputs_embeds.shape
948
+ else:
949
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
950
+
951
+ if self.gradient_checkpointing and self.training:
952
+ if use_cache:
953
+ logger.warning_once(
954
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
955
+ )
956
+ use_cache = False
957
+
958
+ past_key_values_length = 0
959
+
960
+ if use_cache:
961
+ use_legacy_cache = not isinstance(past_key_values, Cache)
962
+ if use_legacy_cache:
963
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
964
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
965
+
966
+ if position_ids is None:
967
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
968
+ position_ids = torch.arange(
969
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
970
+ )
971
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
972
+ else:
973
+ position_ids = position_ids.view(-1, seq_length).long()
974
+
975
+ if inputs_embeds is None:
976
+ inputs_embeds = self.embed_tokens(input_ids)
977
+
978
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
979
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
980
+ if is_padding_right:
981
+ raise ValueError(
982
+ "You are attempting to perform batched generation with padding_side='right'"
983
+ " this may lead to unexpected behaviour for Flash Attention version of Starcoder2. Make sure to "
984
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
985
+ )
986
+
987
+ if self._attn_implementation == "flash_attention_2":
988
+ # 2d mask is passed through the layers
989
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
990
+ elif self._attn_implementation == "sdpa" and not output_attentions:
991
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
992
+ # the manual implementation that requires a 4D causal mask in all cases.
993
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
994
+ attention_mask,
995
+ (batch_size, seq_length),
996
+ inputs_embeds,
997
+ past_key_values_length,
998
+ sliding_window=self.config.sliding_window,
999
+ )
1000
+ else:
1001
+ # 4d mask is passed through the layers
1002
+ attention_mask = _prepare_4d_causal_attention_mask(
1003
+ attention_mask,
1004
+ (batch_size, seq_length),
1005
+ inputs_embeds,
1006
+ past_key_values_length,
1007
+ sliding_window=self.config.sliding_window,
1008
+ )
1009
+
1010
+ hidden_states = inputs_embeds
1011
+ hidden_states = nn.functional.dropout(hidden_states, p=self.embedding_dropout, training=self.training)
1012
+
1013
+ # decoder layers
1014
+ all_hidden_states = () if output_hidden_states else None
1015
+ all_self_attns = () if output_attentions else None
1016
+ next_decoder_cache = None
1017
+
1018
+ for decoder_layer in self.layers:
1019
+ if output_hidden_states:
1020
+ all_hidden_states += (hidden_states,)
1021
+
1022
+ if self.gradient_checkpointing and self.training:
1023
+ layer_outputs = self._gradient_checkpointing_func(
1024
+ decoder_layer.__call__,
1025
+ hidden_states,
1026
+ attention_mask,
1027
+ position_ids,
1028
+ past_key_values,
1029
+ output_attentions,
1030
+ use_cache,
1031
+ )
1032
+ else:
1033
+ layer_outputs = decoder_layer(
1034
+ hidden_states,
1035
+ attention_mask=attention_mask,
1036
+ position_ids=position_ids,
1037
+ past_key_value=past_key_values,
1038
+ output_attentions=output_attentions,
1039
+ use_cache=use_cache,
1040
+ )
1041
+
1042
+ hidden_states = layer_outputs[0]
1043
+
1044
+ if use_cache:
1045
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1046
+
1047
+ if output_attentions:
1048
+ all_self_attns += (layer_outputs[1],)
1049
+
1050
+ hidden_states = self.norm(hidden_states)
1051
+
1052
+ # add hidden states from the last decoder layer
1053
+ if output_hidden_states:
1054
+ all_hidden_states += (hidden_states,)
1055
+
1056
+ next_cache = None
1057
+ if use_cache:
1058
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1059
+
1060
+ if not return_dict:
1061
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1062
+ return BaseModelOutputWithPast(
1063
+ last_hidden_state=hidden_states,
1064
+ past_key_values=next_cache,
1065
+ hidden_states=all_hidden_states,
1066
+ attentions=all_self_attns,
1067
+ )
1068
+
1069
+
1070
+ # Copied from transformers.models.mistral.modeling_mistral.MistralForCausalLM with MISTRAL->STARCODER2,Mistral-7B-v0.1->starcoder2-7b_16k,Mistral->Starcoder2,mistralai->bigcode
1071
+ class Starcoder2ForCausalLM(Starcoder2PreTrainedModel):
1072
+ _tied_weights_keys = ["lm_head.weight"]
1073
+
1074
+ def __init__(self, config):
1075
+ super().__init__(config)
1076
+ self.model = Starcoder2Model(config)
1077
+ self.vocab_size = config.vocab_size
1078
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1079
+
1080
+ # Initialize weights and apply final processing
1081
+ self.post_init()
1082
+
1083
+ def get_input_embeddings(self):
1084
+ return self.model.embed_tokens
1085
+
1086
+ def set_input_embeddings(self, value):
1087
+ self.model.embed_tokens = value
1088
+
1089
+ def get_output_embeddings(self):
1090
+ return self.lm_head
1091
+
1092
+ def set_output_embeddings(self, new_embeddings):
1093
+ self.lm_head = new_embeddings
1094
+
1095
+ def set_decoder(self, decoder):
1096
+ self.model = decoder
1097
+
1098
+ def get_decoder(self):
1099
+ return self.model
1100
+
1101
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
1102
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1103
+ def forward(
1104
+ self,
1105
+ input_ids: torch.LongTensor = None,
1106
+ attention_mask: Optional[torch.Tensor] = None,
1107
+ position_ids: Optional[torch.LongTensor] = None,
1108
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1109
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1110
+ labels: Optional[torch.LongTensor] = None,
1111
+ use_cache: Optional[bool] = None,
1112
+ output_attentions: Optional[bool] = None,
1113
+ output_hidden_states: Optional[bool] = None,
1114
+ return_dict: Optional[bool] = None,
1115
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1116
+ r"""
1117
+ Args:
1118
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1119
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1120
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1121
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1122
+
1123
+ Returns:
1124
+
1125
+ Example:
1126
+
1127
+ ```python
1128
+ >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM
1129
+
1130
+ >>> model = Starcoder2ForCausalLM.from_pretrained("bigcode/starcoder2-7b_16k")
1131
+ >>> tokenizer = AutoTokenizer.from_pretrained("bigcode/starcoder2-7b_16k")
1132
+
1133
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1134
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1135
+
1136
+ >>> # Generate
1137
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1138
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1139
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1140
+ ```"""
1141
+
1142
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1143
+ output_hidden_states = (
1144
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1145
+ )
1146
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1147
+
1148
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1149
+ outputs = self.model(
1150
+ input_ids=input_ids,
1151
+ attention_mask=attention_mask,
1152
+ position_ids=position_ids,
1153
+ past_key_values=past_key_values,
1154
+ inputs_embeds=inputs_embeds,
1155
+ use_cache=use_cache,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ )
1160
+
1161
+ hidden_states = outputs[0]
1162
+ logits = self.lm_head(hidden_states)
1163
+ logits = logits.float()
1164
+
1165
+ loss = None
1166
+ if labels is not None:
1167
+ # Shift so that tokens < n predict n
1168
+ shift_logits = logits[..., :-1, :].contiguous()
1169
+ shift_labels = labels[..., 1:].contiguous()
1170
+ # Flatten the tokens
1171
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1172
+ shift_labels = shift_labels.view(-1)
1173
+ # Ensure tensors are on the same device
1174
+ shift_labels = shift_labels.to(shift_logits.device)
1175
+ loss_fct = CrossEntropyLoss()
1176
+ loss = loss_fct(shift_logits, shift_labels)
1177
+
1178
+ if not return_dict:
1179
+ output = (logits,) + outputs[1:]
1180
+ return (loss,) + output if loss is not None else output
1181
+
1182
+ return CausalLMOutputWithPast(
1183
+ loss=loss,
1184
+ logits=logits,
1185
+ past_key_values=outputs.past_key_values,
1186
+ hidden_states=outputs.hidden_states,
1187
+ attentions=outputs.attentions,
1188
+ )
1189
+
1190
+ def prepare_inputs_for_generation(
1191
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1192
+ ):
1193
+ # Omit tokens covered by past_key_values
1194
+ if past_key_values is not None:
1195
+ if isinstance(past_key_values, Cache):
1196
+ cache_length = past_key_values.get_seq_length()
1197
+ past_length = past_key_values.seen_tokens
1198
+ max_cache_length = past_key_values.get_max_length()
1199
+ else:
1200
+ cache_length = past_length = past_key_values[0][0].shape[2]
1201
+ max_cache_length = None
1202
+
1203
+ # Keep only the unprocessed tokens:
1204
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1205
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1206
+ # input)
1207
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1208
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1209
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1210
+ # input_ids based on the past_length.
1211
+ elif past_length < input_ids.shape[1]:
1212
+ input_ids = input_ids[:, past_length:]
1213
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1214
+
1215
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1216
+ if (
1217
+ max_cache_length is not None
1218
+ and attention_mask is not None
1219
+ and cache_length + input_ids.shape[1] > max_cache_length
1220
+ ):
1221
+ attention_mask = attention_mask[:, -max_cache_length:]
1222
+
1223
+ position_ids = kwargs.get("position_ids", None)
1224
+ if attention_mask is not None and position_ids is None:
1225
+ # create position_ids on the fly for batch generation
1226
+ position_ids = attention_mask.long().cumsum(-1) - 1
1227
+ position_ids.masked_fill_(attention_mask == 0, 1)
1228
+ if past_key_values:
1229
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1230
+
1231
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1232
+ if inputs_embeds is not None and past_key_values is None:
1233
+ model_inputs = {"inputs_embeds": inputs_embeds}
1234
+ else:
1235
+ model_inputs = {"input_ids": input_ids}
1236
+
1237
+ model_inputs.update(
1238
+ {
1239
+ "position_ids": position_ids,
1240
+ "past_key_values": past_key_values,
1241
+ "use_cache": kwargs.get("use_cache"),
1242
+ "attention_mask": attention_mask,
1243
+ }
1244
+ )
1245
+ return model_inputs
1246
+
1247
+ @staticmethod
1248
+ def _reorder_cache(past_key_values, beam_idx):
1249
+ reordered_past = ()
1250
+ for layer_past in past_key_values:
1251
+ reordered_past += (
1252
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1253
+ )
1254
+ return reordered_past
1255
+
1256
+
1257
+ @add_start_docstrings(
1258
+ """
1259
+ The Starcoder2 Model transformer with a sequence classification head on top (linear layer).
1260
+
1261
+ [`Starcoder2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1262
+ (e.g. GPT-2) do.
1263
+
1264
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1265
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1266
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1267
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1268
+ each row of the batch).
1269
+ """,
1270
+ STARCODER2_START_DOCSTRING,
1271
+ )
1272
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Starcoder2, LLAMA->STARCODER2
1273
+ class Starcoder2ForSequenceClassification(Starcoder2PreTrainedModel):
1274
+ def __init__(self, config):
1275
+ super().__init__(config)
1276
+ self.num_labels = config.num_labels
1277
+ self.model = Starcoder2Model(config)
1278
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1279
+
1280
+ # Initialize weights and apply final processing
1281
+ self.post_init()
1282
+
1283
+ def get_input_embeddings(self):
1284
+ return self.model.embed_tokens
1285
+
1286
+ def set_input_embeddings(self, value):
1287
+ self.model.embed_tokens = value
1288
+
1289
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
1290
+ def forward(
1291
+ self,
1292
+ input_ids: torch.LongTensor = None,
1293
+ attention_mask: Optional[torch.Tensor] = None,
1294
+ position_ids: Optional[torch.LongTensor] = None,
1295
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1296
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1297
+ labels: Optional[torch.LongTensor] = None,
1298
+ use_cache: Optional[bool] = None,
1299
+ output_attentions: Optional[bool] = None,
1300
+ output_hidden_states: Optional[bool] = None,
1301
+ return_dict: Optional[bool] = None,
1302
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1303
+ r"""
1304
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1305
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1306
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1307
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1308
+ """
1309
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1310
+
1311
+ transformer_outputs = self.model(
1312
+ input_ids,
1313
+ attention_mask=attention_mask,
1314
+ position_ids=position_ids,
1315
+ past_key_values=past_key_values,
1316
+ inputs_embeds=inputs_embeds,
1317
+ use_cache=use_cache,
1318
+ output_attentions=output_attentions,
1319
+ output_hidden_states=output_hidden_states,
1320
+ return_dict=return_dict,
1321
+ )
1322
+ hidden_states = transformer_outputs[0]
1323
+ logits = self.score(hidden_states)
1324
+
1325
+ if input_ids is not None:
1326
+ batch_size = input_ids.shape[0]
1327
+ else:
1328
+ batch_size = inputs_embeds.shape[0]
1329
+
1330
+ if self.config.pad_token_id is None and batch_size != 1:
1331
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1332
+ if self.config.pad_token_id is None:
1333
+ sequence_lengths = -1
1334
+ else:
1335
+ if input_ids is not None:
1336
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1337
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1338
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1339
+ sequence_lengths = sequence_lengths.to(logits.device)
1340
+ else:
1341
+ sequence_lengths = -1
1342
+
1343
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1344
+
1345
+ loss = None
1346
+ if labels is not None:
1347
+ labels = labels.to(logits.device)
1348
+ if self.config.problem_type is None:
1349
+ if self.num_labels == 1:
1350
+ self.config.problem_type = "regression"
1351
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1352
+ self.config.problem_type = "single_label_classification"
1353
+ else:
1354
+ self.config.problem_type = "multi_label_classification"
1355
+
1356
+ if self.config.problem_type == "regression":
1357
+ loss_fct = MSELoss()
1358
+ if self.num_labels == 1:
1359
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1360
+ else:
1361
+ loss = loss_fct(pooled_logits, labels)
1362
+ elif self.config.problem_type == "single_label_classification":
1363
+ loss_fct = CrossEntropyLoss()
1364
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1365
+ elif self.config.problem_type == "multi_label_classification":
1366
+ loss_fct = BCEWithLogitsLoss()
1367
+ loss = loss_fct(pooled_logits, labels)
1368
+ if not return_dict:
1369
+ output = (pooled_logits,) + transformer_outputs[1:]
1370
+ return ((loss,) + output) if loss is not None else output
1371
+
1372
+ return SequenceClassifierOutputWithPast(
1373
+ loss=loss,
1374
+ logits=pooled_logits,
1375
+ past_key_values=transformer_outputs.past_key_values,
1376
+ hidden_states=transformer_outputs.hidden_states,
1377
+ attentions=transformer_outputs.attentions,
1378
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/tapas/__init__.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
22
+ "tokenization_tapas": ["TapasTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_tapas"] = [
32
+ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "TapasForMaskedLM",
34
+ "TapasForQuestionAnswering",
35
+ "TapasForSequenceClassification",
36
+ "TapasModel",
37
+ "TapasPreTrainedModel",
38
+ "load_tf_weights_in_tapas",
39
+ ]
40
+ try:
41
+ if not is_tf_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_tf_tapas"] = [
47
+ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "TFTapasForMaskedLM",
49
+ "TFTapasForQuestionAnswering",
50
+ "TFTapasForSequenceClassification",
51
+ "TFTapasModel",
52
+ "TFTapasPreTrainedModel",
53
+ ]
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
58
+ from .tokenization_tapas import TapasTokenizer
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_tapas import (
67
+ TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ TapasForMaskedLM,
69
+ TapasForQuestionAnswering,
70
+ TapasForSequenceClassification,
71
+ TapasModel,
72
+ TapasPreTrainedModel,
73
+ load_tf_weights_in_tapas,
74
+ )
75
+
76
+ try:
77
+ if not is_tf_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .modeling_tf_tapas import (
83
+ TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
84
+ TFTapasForMaskedLM,
85
+ TFTapasForQuestionAnswering,
86
+ TFTapasForSequenceClassification,
87
+ TFTapasModel,
88
+ TFTapasPreTrainedModel,
89
+ )
90
+
91
+
92
+ else:
93
+ import sys
94
+
95
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)