applied-ai-018 commited on
Commit
cde5cd1
·
verified ·
1 Parent(s): a6860c3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py +65 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +124 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py +279 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +236 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +613 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez.py +287 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__init__.py +157 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/configuration_gpt2.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/convert_gpt2_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_flax_gpt2.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_gpt2.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_tf_gpt2.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_fast.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_tf.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/configuration_gpt2.py +272 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py +69 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/modeling_flax_gpt2.py +779 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py +1944 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/modeling_tf_gpt2.py +1238 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py +345 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_fast.py +156 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_tf.py +104 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__init__.py +80 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/configuration_gpt_neox.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/modeling_gpt_neox.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/tokenization_gpt_neox_fast.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/configuration_gpt_neox.py +179 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py +1426 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py +243 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py +67 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py +200 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py +1170 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py +800 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py +130 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_audio_spectrogram_transformer": [
21
+ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "ASTConfig",
23
+ ],
24
+ "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_audio_spectrogram_transformer"] = [
34
+ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "ASTForAudioClassification",
36
+ "ASTModel",
37
+ "ASTPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_audio_spectrogram_transformer import (
43
+ AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ ASTConfig,
45
+ )
46
+ from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_audio_spectrogram_transformer import (
55
+ AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ ASTForAudioClassification,
57
+ ASTModel,
58
+ ASTPreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc ADDED
Binary file (7.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Audio Spectogram Transformer (AST) model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class ASTConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the AST
33
+ [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
34
+ architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ hidden_size (`int`, *optional*, defaults to 768):
41
+ Dimensionality of the encoder layers and the pooler layer.
42
+ num_hidden_layers (`int`, *optional*, defaults to 12):
43
+ Number of hidden layers in the Transformer encoder.
44
+ num_attention_heads (`int`, *optional*, defaults to 12):
45
+ Number of attention heads for each attention layer in the Transformer encoder.
46
+ intermediate_size (`int`, *optional*, defaults to 3072):
47
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
48
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
49
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
50
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
51
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
52
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
53
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ initializer_range (`float`, *optional*, defaults to 0.02):
56
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
58
+ The epsilon used by the layer normalization layers.
59
+ patch_size (`int`, *optional*, defaults to 16):
60
+ The size (resolution) of each patch.
61
+ qkv_bias (`bool`, *optional*, defaults to `True`):
62
+ Whether to add a bias to the queries, keys and values.
63
+ frequency_stride (`int`, *optional*, defaults to 10):
64
+ Frequency stride to use when patchifying the spectrograms.
65
+ time_stride (`int`, *optional*, defaults to 10):
66
+ Temporal stride to use when patchifying the spectrograms.
67
+ max_length (`int`, *optional*, defaults to 1024):
68
+ Temporal dimension of the spectrograms.
69
+ num_mel_bins (`int`, *optional*, defaults to 128):
70
+ Frequency dimension of the spectrograms (number of Mel-frequency bins).
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import ASTConfig, ASTModel
76
+
77
+ >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
78
+ >>> configuration = ASTConfig()
79
+
80
+ >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
81
+ >>> model = ASTModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "audio-spectrogram-transformer"
88
+
89
+ def __init__(
90
+ self,
91
+ hidden_size=768,
92
+ num_hidden_layers=12,
93
+ num_attention_heads=12,
94
+ intermediate_size=3072,
95
+ hidden_act="gelu",
96
+ hidden_dropout_prob=0.0,
97
+ attention_probs_dropout_prob=0.0,
98
+ initializer_range=0.02,
99
+ layer_norm_eps=1e-12,
100
+ patch_size=16,
101
+ qkv_bias=True,
102
+ frequency_stride=10,
103
+ time_stride=10,
104
+ max_length=1024,
105
+ num_mel_bins=128,
106
+ **kwargs,
107
+ ):
108
+ super().__init__(**kwargs)
109
+
110
+ self.hidden_size = hidden_size
111
+ self.num_hidden_layers = num_hidden_layers
112
+ self.num_attention_heads = num_attention_heads
113
+ self.intermediate_size = intermediate_size
114
+ self.hidden_act = hidden_act
115
+ self.hidden_dropout_prob = hidden_dropout_prob
116
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
117
+ self.initializer_range = initializer_range
118
+ self.layer_norm_eps = layer_norm_eps
119
+ self.patch_size = patch_size
120
+ self.qkv_bias = qkv_bias
121
+ self.frequency_stride = frequency_stride
122
+ self.time_stride = time_stride
123
+ self.max_length = max_length
124
+ self.num_mel_bins = num_mel_bins
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Audio Spectrogram Transformer checkpoints from the original repository. URL: https://github.com/YuanGongND/ast"""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import torch
23
+ import torchaudio
24
+ from datasets import load_dataset
25
+ from huggingface_hub import hf_hub_download
26
+
27
+ from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def get_audio_spectrogram_transformer_config(model_name):
36
+ config = ASTConfig()
37
+
38
+ if "10-10" in model_name:
39
+ pass
40
+ elif "speech-commands" in model_name:
41
+ config.max_length = 128
42
+ elif "12-12" in model_name:
43
+ config.time_stride = 12
44
+ config.frequency_stride = 12
45
+ elif "14-14" in model_name:
46
+ config.time_stride = 14
47
+ config.frequency_stride = 14
48
+ elif "16-16" in model_name:
49
+ config.time_stride = 16
50
+ config.frequency_stride = 16
51
+ else:
52
+ raise ValueError("Model not supported")
53
+
54
+ repo_id = "huggingface/label-files"
55
+ if "speech-commands" in model_name:
56
+ config.num_labels = 35
57
+ filename = "speech-commands-v2-id2label.json"
58
+ else:
59
+ config.num_labels = 527
60
+ filename = "audioset-id2label.json"
61
+
62
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
63
+ id2label = {int(k): v for k, v in id2label.items()}
64
+ config.id2label = id2label
65
+ config.label2id = {v: k for k, v in id2label.items()}
66
+
67
+ return config
68
+
69
+
70
+ def rename_key(name):
71
+ if "module.v" in name:
72
+ name = name.replace("module.v", "audio_spectrogram_transformer")
73
+ if "cls_token" in name:
74
+ name = name.replace("cls_token", "embeddings.cls_token")
75
+ if "dist_token" in name:
76
+ name = name.replace("dist_token", "embeddings.distillation_token")
77
+ if "pos_embed" in name:
78
+ name = name.replace("pos_embed", "embeddings.position_embeddings")
79
+ if "patch_embed.proj" in name:
80
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
81
+ # transformer blocks
82
+ if "blocks" in name:
83
+ name = name.replace("blocks", "encoder.layer")
84
+ if "attn.proj" in name:
85
+ name = name.replace("attn.proj", "attention.output.dense")
86
+ if "attn" in name:
87
+ name = name.replace("attn", "attention.self")
88
+ if "norm1" in name:
89
+ name = name.replace("norm1", "layernorm_before")
90
+ if "norm2" in name:
91
+ name = name.replace("norm2", "layernorm_after")
92
+ if "mlp.fc1" in name:
93
+ name = name.replace("mlp.fc1", "intermediate.dense")
94
+ if "mlp.fc2" in name:
95
+ name = name.replace("mlp.fc2", "output.dense")
96
+ # final layernorm
97
+ if "audio_spectrogram_transformer.norm" in name:
98
+ name = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm")
99
+ # classifier head
100
+ if "module.mlp_head.0" in name:
101
+ name = name.replace("module.mlp_head.0", "classifier.layernorm")
102
+ if "module.mlp_head.1" in name:
103
+ name = name.replace("module.mlp_head.1", "classifier.dense")
104
+
105
+ return name
106
+
107
+
108
+ def convert_state_dict(orig_state_dict, config):
109
+ for key in orig_state_dict.copy().keys():
110
+ val = orig_state_dict.pop(key)
111
+
112
+ if "qkv" in key:
113
+ key_split = key.split(".")
114
+ layer_num = int(key_split[3])
115
+ dim = config.hidden_size
116
+ if "weight" in key:
117
+ orig_state_dict[
118
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.weight"
119
+ ] = val[:dim, :]
120
+ orig_state_dict[
121
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.weight"
122
+ ] = val[dim : dim * 2, :]
123
+ orig_state_dict[
124
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.weight"
125
+ ] = val[-dim:, :]
126
+ else:
127
+ orig_state_dict[
128
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.bias"
129
+ ] = val[:dim]
130
+ orig_state_dict[
131
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.bias"
132
+ ] = val[dim : dim * 2]
133
+ orig_state_dict[
134
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.bias"
135
+ ] = val[-dim:]
136
+ else:
137
+ orig_state_dict[rename_key(key)] = val
138
+
139
+ return orig_state_dict
140
+
141
+
142
+ def remove_keys(state_dict):
143
+ ignore_keys = [
144
+ "module.v.head.weight",
145
+ "module.v.head.bias",
146
+ "module.v.head_dist.weight",
147
+ "module.v.head_dist.bias",
148
+ ]
149
+ for k in ignore_keys:
150
+ state_dict.pop(k, None)
151
+
152
+
153
+ @torch.no_grad()
154
+ def convert_audio_spectrogram_transformer_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
155
+ """
156
+ Copy/paste/tweak model's weights to our Audio Spectrogram Transformer structure.
157
+ """
158
+ config = get_audio_spectrogram_transformer_config(model_name)
159
+
160
+ model_name_to_url = {
161
+ "ast-finetuned-audioset-10-10-0.4593": (
162
+ "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
163
+ ),
164
+ "ast-finetuned-audioset-10-10-0.450": (
165
+ "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
166
+ ),
167
+ "ast-finetuned-audioset-10-10-0.448": (
168
+ "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
169
+ ),
170
+ "ast-finetuned-audioset-10-10-0.448-v2": (
171
+ "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
172
+ ),
173
+ "ast-finetuned-audioset-12-12-0.447": (
174
+ "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
175
+ ),
176
+ "ast-finetuned-audioset-14-14-0.443": (
177
+ "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
178
+ ),
179
+ "ast-finetuned-audioset-16-16-0.442": (
180
+ "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
181
+ ),
182
+ "ast-finetuned-speech-commands-v2": (
183
+ "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
184
+ ),
185
+ }
186
+
187
+ # load original state_dict
188
+ checkpoint_url = model_name_to_url[model_name]
189
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
190
+ # remove some keys
191
+ remove_keys(state_dict)
192
+ # rename some keys
193
+ new_state_dict = convert_state_dict(state_dict, config)
194
+
195
+ # load 🤗 model
196
+ model = ASTForAudioClassification(config)
197
+ model.eval()
198
+
199
+ model.load_state_dict(new_state_dict)
200
+
201
+ # verify outputs on dummy input
202
+ # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
203
+ mean = -4.2677393 if "speech-commands" not in model_name else -6.845978
204
+ std = 4.5689974 if "speech-commands" not in model_name else 5.5654526
205
+ max_length = 1024 if "speech-commands" not in model_name else 128
206
+ feature_extractor = ASTFeatureExtractor(mean=mean, std=std, max_length=max_length)
207
+
208
+ if "speech-commands" in model_name:
209
+ dataset = load_dataset("speech_commands", "v0.02", split="validation")
210
+ waveform = dataset[0]["audio"]["array"]
211
+ else:
212
+ filepath = hf_hub_download(
213
+ repo_id="nielsr/audio-spectogram-transformer-checkpoint",
214
+ filename="sample_audio.flac",
215
+ repo_type="dataset",
216
+ )
217
+
218
+ waveform, _ = torchaudio.load(filepath)
219
+ waveform = waveform.squeeze().numpy()
220
+
221
+ inputs = feature_extractor(waveform, sampling_rate=16000, return_tensors="pt")
222
+
223
+ # forward pass
224
+ outputs = model(**inputs)
225
+ logits = outputs.logits
226
+
227
+ if model_name == "ast-finetuned-audioset-10-10-0.4593":
228
+ expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602])
229
+ elif model_name == "ast-finetuned-audioset-10-10-0.450":
230
+ expected_slice = torch.tensor([-1.1986, -7.0903, -8.2718])
231
+ elif model_name == "ast-finetuned-audioset-10-10-0.448":
232
+ expected_slice = torch.tensor([-2.6128, -8.0080, -9.4344])
233
+ elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
234
+ expected_slice = torch.tensor([-1.5080, -7.4534, -8.8917])
235
+ elif model_name == "ast-finetuned-audioset-12-12-0.447":
236
+ expected_slice = torch.tensor([-0.5050, -6.5833, -8.0843])
237
+ elif model_name == "ast-finetuned-audioset-14-14-0.443":
238
+ expected_slice = torch.tensor([-0.3826, -7.0336, -8.2413])
239
+ elif model_name == "ast-finetuned-audioset-16-16-0.442":
240
+ expected_slice = torch.tensor([-1.2113, -6.9101, -8.3470])
241
+ elif model_name == "ast-finetuned-speech-commands-v2":
242
+ expected_slice = torch.tensor([6.1589, -8.0566, -8.7984])
243
+ else:
244
+ raise ValueError("Unknown model name")
245
+ if not torch.allclose(logits[0, :3], expected_slice, atol=1e-4):
246
+ raise ValueError("Logits don't match")
247
+ print("Looks ok!")
248
+
249
+ if pytorch_dump_folder_path is not None:
250
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
251
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
252
+ model.save_pretrained(pytorch_dump_folder_path)
253
+ print(f"Saving feature extractor to {pytorch_dump_folder_path}")
254
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
255
+
256
+ if push_to_hub:
257
+ print("Pushing model and feature extractor to the hub...")
258
+ model.push_to_hub(f"MIT/{model_name}")
259
+ feature_extractor.push_to_hub(f"MIT/{model_name}")
260
+
261
+
262
+ if __name__ == "__main__":
263
+ parser = argparse.ArgumentParser()
264
+ # Required parameters
265
+ parser.add_argument(
266
+ "--model_name",
267
+ default="ast-finetuned-audioset-10-10-0.4593",
268
+ type=str,
269
+ help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
270
+ )
271
+ parser.add_argument(
272
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
273
+ )
274
+ parser.add_argument(
275
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
276
+ )
277
+
278
+ args = parser.parse_args()
279
+ convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for Audio Spectrogram Transformer.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
24
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
25
+ from ...feature_extraction_utils import BatchFeature
26
+ from ...utils import TensorType, is_speech_available, is_torch_available, logging
27
+
28
+
29
+ if is_speech_available():
30
+ import torchaudio.compliance.kaldi as ta_kaldi
31
+
32
+ if is_torch_available():
33
+ import torch
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ class ASTFeatureExtractor(SequenceFeatureExtractor):
40
+ r"""
41
+ Constructs a Audio Spectrogram Transformer (AST) feature extractor.
42
+
43
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
44
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
45
+
46
+ This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
47
+ otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation.
48
+
49
+ Args:
50
+ feature_size (`int`, *optional*, defaults to 1):
51
+ The feature dimension of the extracted features.
52
+ sampling_rate (`int`, *optional*, defaults to 16000):
53
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
54
+ num_mel_bins (`int`, *optional*, defaults to 128):
55
+ Number of Mel-frequency bins.
56
+ max_length (`int`, *optional*, defaults to 1024):
57
+ Maximum length to which to pad/truncate the extracted features.
58
+ do_normalize (`bool`, *optional*, defaults to `True`):
59
+ Whether or not to normalize the log-Mel features using `mean` and `std`.
60
+ mean (`float`, *optional*, defaults to -4.2677393):
61
+ The mean value used to normalize the log-Mel features. Uses the AudioSet mean by default.
62
+ std (`float`, *optional*, defaults to 4.5689974):
63
+ The standard deviation value used to normalize the log-Mel features. Uses the AudioSet standard deviation
64
+ by default.
65
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
66
+ Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`.
67
+ """
68
+
69
+ model_input_names = ["input_values", "attention_mask"]
70
+
71
+ def __init__(
72
+ self,
73
+ feature_size=1,
74
+ sampling_rate=16000,
75
+ num_mel_bins=128,
76
+ max_length=1024,
77
+ padding_value=0.0,
78
+ do_normalize=True,
79
+ mean=-4.2677393,
80
+ std=4.5689974,
81
+ return_attention_mask=False,
82
+ **kwargs,
83
+ ):
84
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
85
+ self.num_mel_bins = num_mel_bins
86
+ self.max_length = max_length
87
+ self.do_normalize = do_normalize
88
+ self.mean = mean
89
+ self.std = std
90
+ self.return_attention_mask = return_attention_mask
91
+
92
+ if not is_speech_available():
93
+ mel_filters = mel_filter_bank(
94
+ num_frequency_bins=256,
95
+ num_mel_filters=self.num_mel_bins,
96
+ min_frequency=20,
97
+ max_frequency=sampling_rate // 2,
98
+ sampling_rate=sampling_rate,
99
+ norm=None,
100
+ mel_scale="kaldi",
101
+ triangularize_in_mel_space=True,
102
+ )
103
+
104
+ self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
105
+ self.window = window_function(400, "hann", periodic=False)
106
+
107
+ def _extract_fbank_features(
108
+ self,
109
+ waveform: np.ndarray,
110
+ max_length: int,
111
+ ) -> np.ndarray:
112
+ """
113
+ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
114
+ and hence the waveform should not be normalized before feature extraction.
115
+ """
116
+ # waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
117
+ if is_speech_available():
118
+ waveform = torch.from_numpy(waveform).unsqueeze(0)
119
+ fbank = ta_kaldi.fbank(
120
+ waveform,
121
+ sample_frequency=self.sampling_rate,
122
+ window_type="hanning",
123
+ num_mel_bins=self.num_mel_bins,
124
+ )
125
+ else:
126
+ waveform = np.squeeze(waveform)
127
+ fbank = spectrogram(
128
+ waveform,
129
+ self.window,
130
+ frame_length=400,
131
+ hop_length=160,
132
+ fft_length=512,
133
+ power=2.0,
134
+ center=False,
135
+ preemphasis=0.97,
136
+ mel_filters=self.mel_filters,
137
+ log_mel="log",
138
+ mel_floor=1.192092955078125e-07,
139
+ remove_dc_offset=True,
140
+ ).T
141
+
142
+ fbank = torch.from_numpy(fbank)
143
+
144
+ n_frames = fbank.shape[0]
145
+ difference = max_length - n_frames
146
+
147
+ # pad or truncate, depending on difference
148
+ if difference > 0:
149
+ pad_module = torch.nn.ZeroPad2d((0, 0, 0, difference))
150
+ fbank = pad_module(fbank)
151
+ elif difference < 0:
152
+ fbank = fbank[0:max_length, :]
153
+
154
+ fbank = fbank.numpy()
155
+
156
+ return fbank
157
+
158
+ def normalize(self, input_values: np.ndarray) -> np.ndarray:
159
+ return (input_values - (self.mean)) / (self.std * 2)
160
+
161
+ def __call__(
162
+ self,
163
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
164
+ sampling_rate: Optional[int] = None,
165
+ return_tensors: Optional[Union[str, TensorType]] = None,
166
+ **kwargs,
167
+ ) -> BatchFeature:
168
+ """
169
+ Main method to featurize and prepare for the model one or several sequence(s).
170
+
171
+ Args:
172
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
173
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
174
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
175
+ stereo, i.e. single float per timestep.
176
+ sampling_rate (`int`, *optional*):
177
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
178
+ `sampling_rate` at the forward call to prevent silent errors.
179
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
180
+ If set, will return tensors instead of list of python integers. Acceptable values are:
181
+
182
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
183
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
184
+ - `'np'`: Return Numpy `np.ndarray` objects.
185
+ """
186
+
187
+ if sampling_rate is not None:
188
+ if sampling_rate != self.sampling_rate:
189
+ raise ValueError(
190
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
191
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
192
+ f" {self.sampling_rate} and not {sampling_rate}."
193
+ )
194
+ else:
195
+ logger.warning(
196
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
197
+ "Failing to do so can result in silent errors that might be hard to debug."
198
+ )
199
+
200
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
201
+ if is_batched_numpy and len(raw_speech.shape) > 2:
202
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
203
+ is_batched = is_batched_numpy or (
204
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
205
+ )
206
+
207
+ if is_batched:
208
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
209
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
210
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
211
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
212
+ raw_speech = raw_speech.astype(np.float32)
213
+
214
+ # always return batch
215
+ if not is_batched:
216
+ raw_speech = [raw_speech]
217
+
218
+ # extract fbank features and pad/truncate to max_length
219
+ features = [self._extract_fbank_features(waveform, max_length=self.max_length) for waveform in raw_speech]
220
+
221
+ # convert into BatchFeature
222
+ padded_inputs = BatchFeature({"input_values": features})
223
+
224
+ # make sure list is in array format
225
+ input_values = padded_inputs.get("input_values")
226
+ if isinstance(input_values[0], list):
227
+ padded_inputs["input_values"] = [np.asarray(feature, dtype=np.float32) for feature in input_values]
228
+
229
+ # normalization
230
+ if self.do_normalize:
231
+ padded_inputs["input_values"] = [self.normalize(feature) for feature in input_values]
232
+
233
+ if return_tensors is not None:
234
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
235
+
236
+ return padded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 MIT and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Audio Spectrogram Transformer (AST) model."""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Set, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
30
+ from .configuration_audio_spectrogram_transformer import ASTConfig
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ # General docstring
36
+ _CONFIG_FOR_DOC = "ASTConfig"
37
+
38
+ # Base docstring
39
+ _CHECKPOINT_FOR_DOC = "MIT/ast-finetuned-audioset-10-10-0.4593"
40
+ _EXPECTED_OUTPUT_SHAPE = [1, 1214, 768]
41
+
42
+ # Audio classification docstring
43
+ _SEQ_CLASS_CHECKPOINT = "MIT/ast-finetuned-audioset-10-10-0.4593"
44
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'Speech'"
45
+ _SEQ_CLASS_EXPECTED_LOSS = 0.17
46
+
47
+
48
+ from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
49
+
50
+
51
+ class ASTEmbeddings(nn.Module):
52
+ """
53
+ Construct the CLS token, position and patch embeddings.
54
+ """
55
+
56
+ def __init__(self, config: ASTConfig) -> None:
57
+ super().__init__()
58
+
59
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
60
+ self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
61
+ self.patch_embeddings = ASTPatchEmbeddings(config)
62
+
63
+ frequency_out_dimension, time_out_dimension = self.get_shape(config)
64
+ num_patches = frequency_out_dimension * time_out_dimension
65
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
66
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
67
+ self.config = config
68
+
69
+ def get_shape(self, config):
70
+ # see Karpathy's cs231n blog on how to calculate the output dimensions
71
+ # https://cs231n.github.io/convolutional-networks/#conv
72
+ frequency_out_dimension = (config.num_mel_bins - config.patch_size) // config.frequency_stride + 1
73
+ time_out_dimension = (config.max_length - config.patch_size) // config.time_stride + 1
74
+
75
+ return frequency_out_dimension, time_out_dimension
76
+
77
+ def forward(self, input_values: torch.Tensor) -> torch.Tensor:
78
+ batch_size = input_values.shape[0]
79
+ embeddings = self.patch_embeddings(input_values)
80
+
81
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
82
+ distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
83
+ embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
84
+ embeddings = embeddings + self.position_embeddings
85
+ embeddings = self.dropout(embeddings)
86
+
87
+ return embeddings
88
+
89
+
90
+ class ASTPatchEmbeddings(nn.Module):
91
+ """
92
+ This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size,
93
+ seq_length, hidden_size)` to be consumed by a Transformer.
94
+ """
95
+
96
+ def __init__(self, config):
97
+ super().__init__()
98
+
99
+ patch_size = config.patch_size
100
+ frequency_stride = config.frequency_stride
101
+ time_stride = config.time_stride
102
+
103
+ self.projection = nn.Conv2d(
104
+ 1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride)
105
+ )
106
+
107
+ def forward(self, input_values: torch.Tensor) -> torch.Tensor:
108
+ input_values = input_values.unsqueeze(1)
109
+ input_values = input_values.transpose(2, 3)
110
+ embeddings = self.projection(input_values).flatten(2).transpose(1, 2)
111
+ return embeddings
112
+
113
+
114
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->AST
115
+ class ASTSelfAttention(nn.Module):
116
+ def __init__(self, config: ASTConfig) -> None:
117
+ super().__init__()
118
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
119
+ raise ValueError(
120
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
121
+ f"heads {config.num_attention_heads}."
122
+ )
123
+
124
+ self.num_attention_heads = config.num_attention_heads
125
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
126
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
127
+
128
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
129
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
130
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
131
+
132
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
133
+
134
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
135
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
136
+ x = x.view(new_x_shape)
137
+ return x.permute(0, 2, 1, 3)
138
+
139
+ def forward(
140
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
141
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
142
+ mixed_query_layer = self.query(hidden_states)
143
+
144
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
145
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
146
+ query_layer = self.transpose_for_scores(mixed_query_layer)
147
+
148
+ # Take the dot product between "query" and "key" to get the raw attention scores.
149
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
150
+
151
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
152
+
153
+ # Normalize the attention scores to probabilities.
154
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
155
+
156
+ # This is actually dropping out entire tokens to attend to, which might
157
+ # seem a bit unusual, but is taken from the original Transformer paper.
158
+ attention_probs = self.dropout(attention_probs)
159
+
160
+ # Mask heads if we want to
161
+ if head_mask is not None:
162
+ attention_probs = attention_probs * head_mask
163
+
164
+ context_layer = torch.matmul(attention_probs, value_layer)
165
+
166
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
167
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
168
+ context_layer = context_layer.view(new_context_layer_shape)
169
+
170
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
171
+
172
+ return outputs
173
+
174
+
175
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->AST
176
+ class ASTSelfOutput(nn.Module):
177
+ """
178
+ The residual connection is defined in ASTLayer instead of here (as is the case with other models), due to the
179
+ layernorm applied before each block.
180
+ """
181
+
182
+ def __init__(self, config: ASTConfig) -> None:
183
+ super().__init__()
184
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
185
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
186
+
187
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
188
+ hidden_states = self.dense(hidden_states)
189
+ hidden_states = self.dropout(hidden_states)
190
+
191
+ return hidden_states
192
+
193
+
194
+ # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->AST
195
+ class ASTAttention(nn.Module):
196
+ def __init__(self, config: ASTConfig) -> None:
197
+ super().__init__()
198
+ self.attention = ASTSelfAttention(config)
199
+ self.output = ASTSelfOutput(config)
200
+ self.pruned_heads = set()
201
+
202
+ def prune_heads(self, heads: Set[int]) -> None:
203
+ if len(heads) == 0:
204
+ return
205
+ heads, index = find_pruneable_heads_and_indices(
206
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
207
+ )
208
+
209
+ # Prune linear layers
210
+ self.attention.query = prune_linear_layer(self.attention.query, index)
211
+ self.attention.key = prune_linear_layer(self.attention.key, index)
212
+ self.attention.value = prune_linear_layer(self.attention.value, index)
213
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
214
+
215
+ # Update hyper params and store pruned heads
216
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
217
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
218
+ self.pruned_heads = self.pruned_heads.union(heads)
219
+
220
+ def forward(
221
+ self,
222
+ hidden_states: torch.Tensor,
223
+ head_mask: Optional[torch.Tensor] = None,
224
+ output_attentions: bool = False,
225
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
226
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
227
+
228
+ attention_output = self.output(self_outputs[0], hidden_states)
229
+
230
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
231
+ return outputs
232
+
233
+
234
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->AST
235
+ class ASTIntermediate(nn.Module):
236
+ def __init__(self, config: ASTConfig) -> None:
237
+ super().__init__()
238
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
239
+ if isinstance(config.hidden_act, str):
240
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
241
+ else:
242
+ self.intermediate_act_fn = config.hidden_act
243
+
244
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
245
+ hidden_states = self.dense(hidden_states)
246
+ hidden_states = self.intermediate_act_fn(hidden_states)
247
+
248
+ return hidden_states
249
+
250
+
251
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->AST
252
+ class ASTOutput(nn.Module):
253
+ def __init__(self, config: ASTConfig) -> None:
254
+ super().__init__()
255
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
256
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
257
+
258
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
259
+ hidden_states = self.dense(hidden_states)
260
+ hidden_states = self.dropout(hidden_states)
261
+
262
+ hidden_states = hidden_states + input_tensor
263
+
264
+ return hidden_states
265
+
266
+
267
+ # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->AST
268
+ class ASTLayer(nn.Module):
269
+ """This corresponds to the Block class in the timm implementation."""
270
+
271
+ def __init__(self, config: ASTConfig) -> None:
272
+ super().__init__()
273
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
274
+ self.seq_len_dim = 1
275
+ self.attention = ASTAttention(config)
276
+ self.intermediate = ASTIntermediate(config)
277
+ self.output = ASTOutput(config)
278
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
279
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
280
+
281
+ def forward(
282
+ self,
283
+ hidden_states: torch.Tensor,
284
+ head_mask: Optional[torch.Tensor] = None,
285
+ output_attentions: bool = False,
286
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
287
+ self_attention_outputs = self.attention(
288
+ self.layernorm_before(hidden_states), # in AST, layernorm is applied before self-attention
289
+ head_mask,
290
+ output_attentions=output_attentions,
291
+ )
292
+ attention_output = self_attention_outputs[0]
293
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
294
+
295
+ # first residual connection
296
+ hidden_states = attention_output + hidden_states
297
+
298
+ # in AST, layernorm is also applied after self-attention
299
+ layer_output = self.layernorm_after(hidden_states)
300
+ layer_output = self.intermediate(layer_output)
301
+
302
+ # second residual connection is done here
303
+ layer_output = self.output(layer_output, hidden_states)
304
+
305
+ outputs = (layer_output,) + outputs
306
+
307
+ return outputs
308
+
309
+
310
+ # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->AST
311
+ class ASTEncoder(nn.Module):
312
+ def __init__(self, config: ASTConfig) -> None:
313
+ super().__init__()
314
+ self.config = config
315
+ self.layer = nn.ModuleList([ASTLayer(config) for _ in range(config.num_hidden_layers)])
316
+ self.gradient_checkpointing = False
317
+
318
+ def forward(
319
+ self,
320
+ hidden_states: torch.Tensor,
321
+ head_mask: Optional[torch.Tensor] = None,
322
+ output_attentions: bool = False,
323
+ output_hidden_states: bool = False,
324
+ return_dict: bool = True,
325
+ ) -> Union[tuple, BaseModelOutput]:
326
+ all_hidden_states = () if output_hidden_states else None
327
+ all_self_attentions = () if output_attentions else None
328
+
329
+ for i, layer_module in enumerate(self.layer):
330
+ if output_hidden_states:
331
+ all_hidden_states = all_hidden_states + (hidden_states,)
332
+
333
+ layer_head_mask = head_mask[i] if head_mask is not None else None
334
+
335
+ if self.gradient_checkpointing and self.training:
336
+ layer_outputs = self._gradient_checkpointing_func(
337
+ layer_module.__call__,
338
+ hidden_states,
339
+ layer_head_mask,
340
+ output_attentions,
341
+ )
342
+ else:
343
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
344
+
345
+ hidden_states = layer_outputs[0]
346
+
347
+ if output_attentions:
348
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
349
+
350
+ if output_hidden_states:
351
+ all_hidden_states = all_hidden_states + (hidden_states,)
352
+
353
+ if not return_dict:
354
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
355
+ return BaseModelOutput(
356
+ last_hidden_state=hidden_states,
357
+ hidden_states=all_hidden_states,
358
+ attentions=all_self_attentions,
359
+ )
360
+
361
+
362
+ class ASTPreTrainedModel(PreTrainedModel):
363
+ """
364
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
365
+ models.
366
+ """
367
+
368
+ config_class = ASTConfig
369
+ base_model_prefix = "audio_spectrogram_transformer"
370
+ main_input_name = "input_values"
371
+ supports_gradient_checkpointing = True
372
+
373
+ # Copied from transformers.models.deit.modeling_deit.DeiTPreTrainedModel._init_weights
374
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
375
+ """Initialize the weights"""
376
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
377
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
378
+ # `trunc_normal_cpu` not implemented in `half` issues
379
+ module.weight.data = nn.init.trunc_normal_(
380
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
381
+ ).to(module.weight.dtype)
382
+ if module.bias is not None:
383
+ module.bias.data.zero_()
384
+ elif isinstance(module, nn.LayerNorm):
385
+ module.bias.data.zero_()
386
+ module.weight.data.fill_(1.0)
387
+
388
+
389
+ AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING = r"""
390
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
391
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
392
+ behavior.
393
+
394
+ Parameters:
395
+ config ([`ASTConfig`]):
396
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
397
+ load the weights associated with the model, only the configuration. Check out the
398
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
399
+ """
400
+
401
+ AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING = r"""
402
+ Args:
403
+ input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
404
+ Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
405
+ loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
406
+ the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
407
+ [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
408
+ tensor of type `torch.FloatTensor`. See [`~ASTFeatureExtractor.__call__`]
409
+
410
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
411
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
412
+
413
+ - 1 indicates the head is **not masked**,
414
+ - 0 indicates the head is **masked**.
415
+
416
+ output_attentions (`bool`, *optional*):
417
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
418
+ tensors for more detail.
419
+ output_hidden_states (`bool`, *optional*):
420
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
421
+ more detail.
422
+ return_dict (`bool`, *optional*):
423
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
424
+ """
425
+
426
+
427
+ @add_start_docstrings(
428
+ "The bare AST Model transformer outputting raw hidden-states without any specific head on top.",
429
+ AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING,
430
+ )
431
+ class ASTModel(ASTPreTrainedModel):
432
+ def __init__(self, config: ASTConfig) -> None:
433
+ super().__init__(config)
434
+ self.config = config
435
+
436
+ self.embeddings = ASTEmbeddings(config)
437
+ self.encoder = ASTEncoder(config)
438
+
439
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
440
+
441
+ # Initialize weights and apply final processing
442
+ self.post_init()
443
+
444
+ def get_input_embeddings(self) -> ASTPatchEmbeddings:
445
+ return self.embeddings.patch_embeddings
446
+
447
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
448
+ """
449
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
450
+ class PreTrainedModel
451
+ """
452
+ for layer, heads in heads_to_prune.items():
453
+ self.encoder.layer[layer].attention.prune_heads(heads)
454
+
455
+ @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING)
456
+ @add_code_sample_docstrings(
457
+ checkpoint=_CHECKPOINT_FOR_DOC,
458
+ output_type=BaseModelOutputWithPooling,
459
+ config_class=_CONFIG_FOR_DOC,
460
+ modality="audio",
461
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
462
+ )
463
+ def forward(
464
+ self,
465
+ input_values: Optional[torch.Tensor] = None,
466
+ head_mask: Optional[torch.Tensor] = None,
467
+ output_attentions: Optional[bool] = None,
468
+ output_hidden_states: Optional[bool] = None,
469
+ return_dict: Optional[bool] = None,
470
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
471
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
472
+ output_hidden_states = (
473
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
474
+ )
475
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
476
+
477
+ if input_values is None:
478
+ raise ValueError("You have to specify input_values")
479
+
480
+ # Prepare head mask if needed
481
+ # 1.0 in head_mask indicate we keep the head
482
+ # attention_probs has shape bsz x n_heads x N x N
483
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
484
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
485
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
486
+
487
+ embedding_output = self.embeddings(input_values)
488
+
489
+ encoder_outputs = self.encoder(
490
+ embedding_output,
491
+ head_mask=head_mask,
492
+ output_attentions=output_attentions,
493
+ output_hidden_states=output_hidden_states,
494
+ return_dict=return_dict,
495
+ )
496
+ sequence_output = encoder_outputs[0]
497
+ sequence_output = self.layernorm(sequence_output)
498
+
499
+ pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2
500
+
501
+ if not return_dict:
502
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
503
+
504
+ return BaseModelOutputWithPooling(
505
+ last_hidden_state=sequence_output,
506
+ pooler_output=pooled_output,
507
+ hidden_states=encoder_outputs.hidden_states,
508
+ attentions=encoder_outputs.attentions,
509
+ )
510
+
511
+
512
+ class ASTMLPHead(nn.Module):
513
+ def __init__(self, config: ASTConfig):
514
+ super().__init__()
515
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
516
+ self.dense = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
517
+
518
+ def forward(self, hidden_state):
519
+ hidden_state = self.layernorm(hidden_state)
520
+ hidden_state = self.dense(hidden_state)
521
+ return hidden_state
522
+
523
+
524
+ @add_start_docstrings(
525
+ """
526
+ Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled
527
+ output) e.g. for datasets like AudioSet, Speech Commands v2.
528
+ """,
529
+ AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING,
530
+ )
531
+ class ASTForAudioClassification(ASTPreTrainedModel):
532
+ def __init__(self, config: ASTConfig) -> None:
533
+ super().__init__(config)
534
+
535
+ self.num_labels = config.num_labels
536
+ self.audio_spectrogram_transformer = ASTModel(config)
537
+
538
+ # Classifier head
539
+ self.classifier = ASTMLPHead(config)
540
+
541
+ # Initialize weights and apply final processing
542
+ self.post_init()
543
+
544
+ @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING)
545
+ @add_code_sample_docstrings(
546
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
547
+ output_type=SequenceClassifierOutput,
548
+ config_class=_CONFIG_FOR_DOC,
549
+ modality="audio",
550
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
551
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
552
+ )
553
+ def forward(
554
+ self,
555
+ input_values: Optional[torch.Tensor] = None,
556
+ head_mask: Optional[torch.Tensor] = None,
557
+ labels: Optional[torch.Tensor] = None,
558
+ output_attentions: Optional[bool] = None,
559
+ output_hidden_states: Optional[bool] = None,
560
+ return_dict: Optional[bool] = None,
561
+ ) -> Union[tuple, SequenceClassifierOutput]:
562
+ r"""
563
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
564
+ Labels for computing the audio classification/regression loss. Indices should be in `[0, ...,
565
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
566
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
567
+ """
568
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
569
+
570
+ outputs = self.audio_spectrogram_transformer(
571
+ input_values,
572
+ head_mask=head_mask,
573
+ output_attentions=output_attentions,
574
+ output_hidden_states=output_hidden_states,
575
+ return_dict=return_dict,
576
+ )
577
+
578
+ pooled_output = outputs[1]
579
+ logits = self.classifier(pooled_output)
580
+
581
+ loss = None
582
+ if labels is not None:
583
+ if self.config.problem_type is None:
584
+ if self.num_labels == 1:
585
+ self.config.problem_type = "regression"
586
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
587
+ self.config.problem_type = "single_label_classification"
588
+ else:
589
+ self.config.problem_type = "multi_label_classification"
590
+
591
+ if self.config.problem_type == "regression":
592
+ loss_fct = MSELoss()
593
+ if self.num_labels == 1:
594
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
595
+ else:
596
+ loss = loss_fct(logits, labels)
597
+ elif self.config.problem_type == "single_label_classification":
598
+ loss_fct = CrossEntropyLoss()
599
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
600
+ elif self.config.problem_type == "multi_label_classification":
601
+ loss_fct = BCEWithLogitsLoss()
602
+ loss = loss_fct(logits, labels)
603
+
604
+ if not return_dict:
605
+ output = (logits,) + outputs[2:]
606
+ return ((loss,) + output) if loss is not None else output
607
+
608
+ return SequenceClassifierOutput(
609
+ loss=loss,
610
+ logits=logits,
611
+ hidden_states=outputs.hidden_states,
612
+ attentions=outputs.attentions,
613
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (927 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc ADDED
Binary file (7.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for the BARThez model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+ # TODO this class is useless. This is the most standard sentencpiece model. Let's find which one is closest and nuke this.
36
+
37
+
38
+ class BarthezTokenizer(PreTrainedTokenizer):
39
+ """
40
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a BARThez tokenizer. Based on
41
+ [SentencePiece](https://github.com/google/sentencepiece).
42
+
43
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
44
+ this superclass for more information regarding those methods.
45
+
46
+ Args:
47
+ vocab_file (`str`):
48
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
49
+ contains the vocabulary necessary to instantiate a tokenizer.
50
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
51
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
52
+
53
+ <Tip>
54
+
55
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
56
+ sequence. The token used is the `cls_token`.
57
+
58
+ </Tip>
59
+
60
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
61
+ The end of sequence token.
62
+
63
+ <Tip>
64
+
65
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
66
+ The token used is the `sep_token`.
67
+
68
+ </Tip>
69
+
70
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
71
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
72
+ sequence classification or for a text and a question for question answering. It is also used as the last
73
+ token of a sequence built with special tokens.
74
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
75
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
76
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
77
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
78
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
79
+ token instead.
80
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
81
+ The token used for padding, for example when batching sequences of different lengths.
82
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ sp_model_kwargs (`dict`, *optional*):
86
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
87
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
88
+ to set:
89
+
90
+ - `enable_sampling`: Enable subword regularization.
91
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
92
+
93
+ - `nbest_size = {0,1}`: No sampling is performed.
94
+ - `nbest_size > 1`: samples from the nbest_size results.
95
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
96
+ using forward-filtering-and-backward-sampling algorithm.
97
+
98
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
99
+ BPE-dropout.
100
+
101
+ Attributes:
102
+ sp_model (`SentencePieceProcessor`):
103
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
104
+ """
105
+
106
+ vocab_files_names = VOCAB_FILES_NAMES
107
+ model_input_names = ["input_ids", "attention_mask"]
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_file,
112
+ bos_token="<s>",
113
+ eos_token="</s>",
114
+ sep_token="</s>",
115
+ cls_token="<s>",
116
+ unk_token="<unk>",
117
+ pad_token="<pad>",
118
+ mask_token="<mask>",
119
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
120
+ **kwargs,
121
+ ) -> None:
122
+ # Mask token behave like a normal word, i.e. include the space before it. Will have normalized=False by default this way
123
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
124
+
125
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
126
+
127
+ self.vocab_file = vocab_file
128
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
129
+ self.sp_model.Load(str(vocab_file))
130
+ super().__init__(
131
+ bos_token=bos_token,
132
+ eos_token=eos_token,
133
+ unk_token=unk_token,
134
+ sep_token=sep_token,
135
+ cls_token=cls_token,
136
+ pad_token=pad_token,
137
+ mask_token=mask_token,
138
+ sp_model_kwargs=self.sp_model_kwargs,
139
+ **kwargs,
140
+ )
141
+
142
+ def build_inputs_with_special_tokens(
143
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
144
+ ) -> List[int]:
145
+ """
146
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
147
+ adding special tokens. A BARThez sequence has the following format:
148
+
149
+ - single sequence: `<s> X </s>`
150
+ - pair of sequences: `<s> A </s></s> B </s>`
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs to which the special tokens will be added.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
160
+ """
161
+
162
+ if token_ids_1 is None:
163
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
164
+ cls = [self.cls_token_id]
165
+ sep = [self.sep_token_id]
166
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
167
+
168
+ def get_special_tokens_mask(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
170
+ ) -> List[int]:
171
+ """
172
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
173
+ special tokens using the tokenizer `prepare_for_model` method.
174
+
175
+ Args:
176
+ token_ids_0 (`List[int]`):
177
+ List of IDs.
178
+ token_ids_1 (`List[int]`, *optional*):
179
+ Optional second list of IDs for sequence pairs.
180
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
181
+ Whether or not the token list is already formatted with special tokens for the model.
182
+
183
+ Returns:
184
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
185
+ """
186
+ if already_has_special_tokens:
187
+ return super().get_special_tokens_mask(
188
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
189
+ )
190
+
191
+ if token_ids_1 is None:
192
+ return [1] + ([0] * len(token_ids_0)) + [1]
193
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
194
+
195
+ def create_token_type_ids_from_sequences(
196
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
197
+ ) -> List[int]:
198
+ """
199
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+
207
+ Returns:
208
+ `List[int]`: List of zeros.
209
+ """
210
+ sep = [self.sep_token_id]
211
+ cls = [self.cls_token_id]
212
+
213
+ if token_ids_1 is None:
214
+ return len(cls + token_ids_0 + sep) * [0]
215
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
216
+
217
+ @property
218
+ def vocab_size(self):
219
+ return len(self.sp_model)
220
+
221
+ def get_vocab(self):
222
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
223
+ vocab.update(self.added_tokens_encoder)
224
+ return vocab
225
+
226
+ def _tokenize(self, text: str) -> List[str]:
227
+ return self.sp_model.encode(text, out_type=str)
228
+
229
+ def _convert_token_to_id(self, token):
230
+ """Converts a token (str) in an id using the vocab."""
231
+ return self.sp_model.PieceToId(token)
232
+
233
+ def _convert_id_to_token(self, index):
234
+ """Converts an index (integer) in a token (str) using the vocab."""
235
+ return self.sp_model.IdToPiece(index)
236
+
237
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
238
+ def convert_tokens_to_string(self, tokens):
239
+ """Converts a sequence of tokens (string) in a single string."""
240
+ current_sub_tokens = []
241
+ out_string = ""
242
+ prev_is_special = False
243
+ for token in tokens:
244
+ # make sure that special tokens are not decoded using sentencepiece model
245
+ if token in self.all_special_tokens:
246
+ if not prev_is_special:
247
+ out_string += " "
248
+ out_string += self.sp_model.decode(current_sub_tokens) + token
249
+ prev_is_special = True
250
+ current_sub_tokens = []
251
+ else:
252
+ current_sub_tokens.append(token)
253
+ prev_is_special = False
254
+ out_string += self.sp_model.decode(current_sub_tokens)
255
+ return out_string.strip()
256
+
257
+ def __getstate__(self):
258
+ state = self.__dict__.copy()
259
+ state["sp_model"] = None
260
+ return state
261
+
262
+ def __setstate__(self, d):
263
+ self.__dict__ = d
264
+
265
+ # for backward compatibility
266
+ if not hasattr(self, "sp_model_kwargs"):
267
+ self.sp_model_kwargs = {}
268
+
269
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
270
+ self.sp_model.Load(self.vocab_file)
271
+
272
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
273
+ if not os.path.isdir(save_directory):
274
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
275
+ return
276
+ out_vocab_file = os.path.join(
277
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
278
+ )
279
+
280
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
281
+ copyfile(self.vocab_file, out_vocab_file)
282
+ elif not os.path.isfile(self.vocab_file):
283
+ with open(out_vocab_file, "wb") as fi:
284
+ content_spiece_model = self.sp_model.serialized_model_proto()
285
+ fi.write(content_spiece_model)
286
+
287
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_keras_nlp_available,
22
+ is_tensorflow_text_available,
23
+ is_tf_available,
24
+ is_tokenizers_available,
25
+ is_torch_available,
26
+ )
27
+
28
+
29
+ _import_structure = {
30
+ "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"],
31
+ "tokenization_gpt2": ["GPT2Tokenizer"],
32
+ }
33
+
34
+ try:
35
+ if not is_tokenizers_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"]
41
+
42
+ try:
43
+ if not is_torch_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ _import_structure["modeling_gpt2"] = [
49
+ "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
50
+ "GPT2DoubleHeadsModel",
51
+ "GPT2ForQuestionAnswering",
52
+ "GPT2ForSequenceClassification",
53
+ "GPT2ForTokenClassification",
54
+ "GPT2LMHeadModel",
55
+ "GPT2Model",
56
+ "GPT2PreTrainedModel",
57
+ "load_tf_weights_in_gpt2",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_gpt2"] = [
67
+ "TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFGPT2DoubleHeadsModel",
69
+ "TFGPT2ForSequenceClassification",
70
+ "TFGPT2LMHeadModel",
71
+ "TFGPT2MainLayer",
72
+ "TFGPT2Model",
73
+ "TFGPT2PreTrainedModel",
74
+ ]
75
+
76
+ try:
77
+ if not is_keras_nlp_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ _import_structure["tokenization_gpt2_tf"] = ["TFGPT2Tokenizer"]
83
+
84
+ try:
85
+ if not is_flax_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"]
91
+
92
+ if TYPE_CHECKING:
93
+ from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig
94
+ from .tokenization_gpt2 import GPT2Tokenizer
95
+
96
+ try:
97
+ if not is_tokenizers_available():
98
+ raise OptionalDependencyNotAvailable()
99
+ except OptionalDependencyNotAvailable:
100
+ pass
101
+ else:
102
+ from .tokenization_gpt2_fast import GPT2TokenizerFast
103
+
104
+ try:
105
+ if not is_torch_available():
106
+ raise OptionalDependencyNotAvailable()
107
+ except OptionalDependencyNotAvailable:
108
+ pass
109
+ else:
110
+ from .modeling_gpt2 import (
111
+ GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
112
+ GPT2DoubleHeadsModel,
113
+ GPT2ForQuestionAnswering,
114
+ GPT2ForSequenceClassification,
115
+ GPT2ForTokenClassification,
116
+ GPT2LMHeadModel,
117
+ GPT2Model,
118
+ GPT2PreTrainedModel,
119
+ load_tf_weights_in_gpt2,
120
+ )
121
+
122
+ try:
123
+ if not is_tf_available():
124
+ raise OptionalDependencyNotAvailable()
125
+ except OptionalDependencyNotAvailable:
126
+ pass
127
+ else:
128
+ from .modeling_tf_gpt2 import (
129
+ TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
130
+ TFGPT2DoubleHeadsModel,
131
+ TFGPT2ForSequenceClassification,
132
+ TFGPT2LMHeadModel,
133
+ TFGPT2MainLayer,
134
+ TFGPT2Model,
135
+ TFGPT2PreTrainedModel,
136
+ )
137
+
138
+ try:
139
+ if not is_keras_nlp_available():
140
+ raise OptionalDependencyNotAvailable()
141
+ except OptionalDependencyNotAvailable:
142
+ pass
143
+ else:
144
+ from .tokenization_gpt2_tf import TFGPT2Tokenizer
145
+
146
+ try:
147
+ if not is_flax_available():
148
+ raise OptionalDependencyNotAvailable()
149
+ except OptionalDependencyNotAvailable:
150
+ pass
151
+ else:
152
+ from .modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel
153
+
154
+ else:
155
+ import sys
156
+
157
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/configuration_gpt2.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/convert_gpt2_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_flax_gpt2.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_gpt2.cpython-310.pyc ADDED
Binary file (55.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_tf_gpt2.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_fast.cpython-310.pyc ADDED
Binary file (5.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_tf.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/configuration_gpt2.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ OpenAI GPT-2 configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Any, List, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class GPT2Config(PretrainedConfig):
33
+ """
34
+ This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
35
+ instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the GPT-2
37
+ [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 50257):
45
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
47
+ n_positions (`int`, *optional*, defaults to 1024):
48
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
49
+ just in case (e.g., 512 or 1024 or 2048).
50
+ n_embd (`int`, *optional*, defaults to 768):
51
+ Dimensionality of the embeddings and hidden states.
52
+ n_layer (`int`, *optional*, defaults to 12):
53
+ Number of hidden layers in the Transformer encoder.
54
+ n_head (`int`, *optional*, defaults to 12):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ n_inner (`int`, *optional*):
57
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
58
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
59
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
60
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ embd_pdrop (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the embeddings.
64
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention.
66
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
67
+ The epsilon to use in the layer normalization layers.
68
+ initializer_range (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ summary_type (`string`, *optional*, defaults to `"cls_index"`):
71
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
72
+ [`TFGPT2DoubleHeadsModel`].
73
+
74
+ Has to be one of the following options:
75
+
76
+ - `"last"`: Take the last token hidden state (like XLNet).
77
+ - `"first"`: Take the first token hidden state (like BERT).
78
+ - `"mean"`: Take the mean of all tokens hidden states.
79
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
80
+ - `"attn"`: Not implemented now, use multi-head attention.
81
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
82
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
83
+ [`TFGPT2DoubleHeadsModel`].
84
+
85
+ Whether or not to add a projection after the vector extraction.
86
+ summary_activation (`str`, *optional*):
87
+ Argument used when doing sequence summary. Used in for the multiple choice head in
88
+ [`GPT2DoubleHeadsModel`].
89
+
90
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
91
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
92
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
93
+ [`TFGPT2DoubleHeadsModel`].
94
+
95
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
96
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
97
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
98
+ [`TFGPT2DoubleHeadsModel`].
99
+
100
+ The dropout ratio to be used after the projection and activation.
101
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
102
+ Scale attention weights by dividing by sqrt(hidden_size)..
103
+ use_cache (`bool`, *optional*, defaults to `True`):
104
+ Whether or not the model should return the last key/values attentions (not used by all models).
105
+ bos_token_id (`int`, *optional*, defaults to 50256):
106
+ Id of the beginning of sentence token in the vocabulary.
107
+ eos_token_id (`int`, *optional*, defaults to 50256):
108
+ Id of the end of sentence token in the vocabulary.
109
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
110
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
111
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
112
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
113
+ dot-product/softmax to float() when training with mixed precision.
114
+
115
+ Example:
116
+
117
+ ```python
118
+ >>> from transformers import GPT2Config, GPT2Model
119
+
120
+ >>> # Initializing a GPT2 configuration
121
+ >>> configuration = GPT2Config()
122
+
123
+ >>> # Initializing a model (with random weights) from the configuration
124
+ >>> model = GPT2Model(configuration)
125
+
126
+ >>> # Accessing the model configuration
127
+ >>> configuration = model.config
128
+ ```"""
129
+
130
+ model_type = "gpt2"
131
+ keys_to_ignore_at_inference = ["past_key_values"]
132
+ attribute_map = {
133
+ "hidden_size": "n_embd",
134
+ "max_position_embeddings": "n_positions",
135
+ "num_attention_heads": "n_head",
136
+ "num_hidden_layers": "n_layer",
137
+ }
138
+
139
+ def __init__(
140
+ self,
141
+ vocab_size=50257,
142
+ n_positions=1024,
143
+ n_embd=768,
144
+ n_layer=12,
145
+ n_head=12,
146
+ n_inner=None,
147
+ activation_function="gelu_new",
148
+ resid_pdrop=0.1,
149
+ embd_pdrop=0.1,
150
+ attn_pdrop=0.1,
151
+ layer_norm_epsilon=1e-5,
152
+ initializer_range=0.02,
153
+ summary_type="cls_index",
154
+ summary_use_proj=True,
155
+ summary_activation=None,
156
+ summary_proj_to_labels=True,
157
+ summary_first_dropout=0.1,
158
+ scale_attn_weights=True,
159
+ use_cache=True,
160
+ bos_token_id=50256,
161
+ eos_token_id=50256,
162
+ scale_attn_by_inverse_layer_idx=False,
163
+ reorder_and_upcast_attn=False,
164
+ **kwargs,
165
+ ):
166
+ self.vocab_size = vocab_size
167
+ self.n_positions = n_positions
168
+ self.n_embd = n_embd
169
+ self.n_layer = n_layer
170
+ self.n_head = n_head
171
+ self.n_inner = n_inner
172
+ self.activation_function = activation_function
173
+ self.resid_pdrop = resid_pdrop
174
+ self.embd_pdrop = embd_pdrop
175
+ self.attn_pdrop = attn_pdrop
176
+ self.layer_norm_epsilon = layer_norm_epsilon
177
+ self.initializer_range = initializer_range
178
+ self.summary_type = summary_type
179
+ self.summary_use_proj = summary_use_proj
180
+ self.summary_activation = summary_activation
181
+ self.summary_first_dropout = summary_first_dropout
182
+ self.summary_proj_to_labels = summary_proj_to_labels
183
+ self.scale_attn_weights = scale_attn_weights
184
+ self.use_cache = use_cache
185
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
186
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
187
+
188
+ self.bos_token_id = bos_token_id
189
+ self.eos_token_id = eos_token_id
190
+
191
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
192
+
193
+
194
+ class GPT2OnnxConfig(OnnxConfigWithPast):
195
+ def __init__(
196
+ self,
197
+ config: PretrainedConfig,
198
+ task: str = "default",
199
+ patching_specs: List[PatchingSpec] = None,
200
+ use_past: bool = False,
201
+ ):
202
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
203
+ if not getattr(self._config, "pad_token_id", None):
204
+ # TODO: how to do that better?
205
+ self._config.pad_token_id = 0
206
+
207
+ @property
208
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
209
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
210
+ if self.use_past:
211
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
212
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
213
+ else:
214
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
215
+
216
+ return common_inputs
217
+
218
+ @property
219
+ def num_layers(self) -> int:
220
+ return self._config.n_layer
221
+
222
+ @property
223
+ def num_attention_heads(self) -> int:
224
+ return self._config.n_head
225
+
226
+ def generate_dummy_inputs(
227
+ self,
228
+ tokenizer: PreTrainedTokenizer,
229
+ batch_size: int = -1,
230
+ seq_length: int = -1,
231
+ is_pair: bool = False,
232
+ framework: Optional[TensorType] = None,
233
+ ) -> Mapping[str, Any]:
234
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
235
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
236
+ )
237
+
238
+ # We need to order the input in the way they appears in the forward()
239
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
240
+
241
+ # Need to add the past_keys
242
+ if self.use_past:
243
+ if not is_torch_available():
244
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
245
+ else:
246
+ import torch
247
+
248
+ batch, seqlen = common_inputs["input_ids"].shape
249
+ # Not using the same length for past_key_values
250
+ past_key_values_length = seqlen + 2
251
+ past_shape = (
252
+ batch,
253
+ self.num_attention_heads,
254
+ past_key_values_length,
255
+ self._config.hidden_size // self.num_attention_heads,
256
+ )
257
+ ordered_inputs["past_key_values"] = [
258
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
259
+ ]
260
+
261
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
262
+ if self.use_past:
263
+ mask_dtype = ordered_inputs["attention_mask"].dtype
264
+ ordered_inputs["attention_mask"] = torch.cat(
265
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
266
+ )
267
+
268
+ return ordered_inputs
269
+
270
+ @property
271
+ def default_onnx_opset(self) -> int:
272
+ return 13
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert OpenAI GPT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2
23
+ from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
30
+ # Construct model
31
+ if gpt2_config_file == "":
32
+ config = GPT2Config()
33
+ else:
34
+ config = GPT2Config.from_json_file(gpt2_config_file)
35
+ model = GPT2Model(config)
36
+
37
+ # Load weights from numpy
38
+ load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path)
39
+
40
+ # Save pytorch-model
41
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
42
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
43
+ print(f"Save PyTorch model to {pytorch_weights_dump_path}")
44
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
45
+ print(f"Save configuration file to {pytorch_config_dump_path}")
46
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
47
+ f.write(config.to_json_string())
48
+
49
+
50
+ if __name__ == "__main__":
51
+ parser = argparse.ArgumentParser()
52
+ # Required parameters
53
+ parser.add_argument(
54
+ "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ parser.add_argument(
60
+ "--gpt2_config_file",
61
+ default="",
62
+ type=str,
63
+ help=(
64
+ "An optional config json file corresponding to the pre-trained OpenAI model. \n"
65
+ "This specifies the model architecture."
66
+ ),
67
+ )
68
+ args = parser.parse_args()
69
+ convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/modeling_flax_gpt2.py ADDED
@@ -0,0 +1,779 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any, Optional, Tuple
17
+
18
+ import flax.linen as nn
19
+ import jax
20
+ import jax.numpy as jnp
21
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
22
+ from flax.linen import combine_masks, make_causal_mask
23
+ from flax.linen.attention import dot_product_attention_weights
24
+ from flax.traverse_util import flatten_dict, unflatten_dict
25
+ from jax import lax
26
+
27
+ from ...modeling_flax_outputs import (
28
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
29
+ FlaxCausalLMOutputWithCrossAttentions,
30
+ )
31
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
32
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
33
+ from .configuration_gpt2 import GPT2Config
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ _CHECKPOINT_FOR_DOC = "openai-community/gpt2"
39
+ _CONFIG_FOR_DOC = "GPT2Config"
40
+
41
+
42
+ GPT2_START_DOCSTRING = r"""
43
+
44
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
45
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
46
+ etc.)
47
+
48
+ This model is also a Flax Linen
49
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
50
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
51
+
52
+ Finally, this model supports inherent JAX features such as:
53
+
54
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
55
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
56
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
57
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
58
+
59
+ Parameters:
60
+ config ([`GPT2Config`]): Model configuration class with all the parameters of the model.
61
+ Initializing with a config file does not load the weights associated with the model, only the
62
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
63
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
64
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
65
+ `jax.numpy.bfloat16` (on TPUs).
66
+
67
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
68
+ specified all the computation will be performed with the given `dtype`.
69
+
70
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
71
+ parameters.**
72
+
73
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
74
+ [`~FlaxPreTrainedModel.to_bf16`].
75
+ """
76
+
77
+ GPT2_INPUTS_DOCSTRING = r"""
78
+ Args:
79
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
80
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
81
+
82
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
83
+ [`PreTrainedTokenizer.__call__`] for details.
84
+
85
+ [What are input IDs?](../glossary#input-ids)
86
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
87
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
88
+
89
+ - 1 for tokens that are **not masked**,
90
+ - 0 for tokens that are **masked**.
91
+
92
+ [What are attention masks?](../glossary#attention-mask)
93
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
94
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
95
+ config.max_position_embeddings - 1]`.
96
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
97
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
98
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
99
+ output_attentions (`bool`, *optional*):
100
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
101
+ tensors for more detail.
102
+ output_hidden_states (`bool`, *optional*):
103
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
104
+ more detail.
105
+ return_dict (`bool`, *optional*):
106
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
107
+ """
108
+
109
+
110
+ class FlaxConv1D(nn.Module):
111
+ features: int
112
+ use_bias: bool = True
113
+ dtype: Any = jnp.float32
114
+ precision: Any = None
115
+
116
+ @nn.compact
117
+ def __call__(self, inputs):
118
+ inputs = jnp.asarray(inputs, self.dtype)
119
+ kernel = self.param("kernel", jax.nn.initializers.normal(stddev=0.02), (self.features, inputs.shape[-1]))
120
+ kernel = jnp.asarray(kernel.transpose(), self.dtype)
121
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())), precision=self.precision)
122
+ if self.use_bias:
123
+ bias = self.param("bias", jax.nn.initializers.zeros, (self.features,))
124
+ bias = jnp.asarray(bias, self.dtype)
125
+ y = y + bias
126
+ return y
127
+
128
+
129
+ class FlaxGPT2Attention(nn.Module):
130
+ config: GPT2Config
131
+ dtype: jnp.dtype = jnp.float32
132
+ causal: bool = True
133
+ is_cross_attention: bool = False
134
+
135
+ def setup(self):
136
+ config = self.config
137
+ self.embed_dim = config.hidden_size
138
+ self.num_heads = config.num_attention_heads
139
+ self.head_dim = self.embed_dim // self.num_heads
140
+
141
+ if self.is_cross_attention:
142
+ self.c_attn = FlaxConv1D(2 * self.embed_dim, dtype=self.dtype)
143
+ self.q_attn = FlaxConv1D(self.embed_dim, dtype=self.dtype)
144
+ else:
145
+ self.c_attn = FlaxConv1D(3 * self.embed_dim, dtype=self.dtype)
146
+ self.c_proj = FlaxConv1D(self.embed_dim, dtype=self.dtype)
147
+
148
+ self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
149
+
150
+ if self.causal:
151
+ self.causal_mask = make_causal_mask(
152
+ jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool"
153
+ )
154
+
155
+ def _split_heads(self, hidden_states):
156
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
157
+
158
+ def _merge_heads(self, hidden_states):
159
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
160
+
161
+ @nn.compact
162
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
163
+ """
164
+ This function takes projected key, value states from a single input token and concatenates the states to cached
165
+ states from previous steps. This function is slighly adapted from the official Flax repository:
166
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
167
+ """
168
+ # detect if we're initializing by absence of existing cache data.
169
+ is_initialized = self.has_variable("cache", "cached_key")
170
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
171
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
172
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
173
+
174
+ if is_initialized:
175
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
176
+ # update key, value caches with our new 1d spatial slices
177
+ cur_index = cache_index.value
178
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
179
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
180
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
181
+ cached_key.value = key
182
+ cached_value.value = value
183
+ num_updated_cache_vectors = query.shape[1]
184
+ cache_index.value = cache_index.value + num_updated_cache_vectors
185
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
186
+ pad_mask = jnp.broadcast_to(
187
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
188
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
189
+ )
190
+ attention_mask = combine_masks(pad_mask, attention_mask)
191
+ return key, value, attention_mask
192
+
193
+ def __call__(
194
+ self,
195
+ hidden_states,
196
+ key_value_states: Optional[jnp.ndarray] = None,
197
+ attention_mask=None,
198
+ deterministic: bool = True,
199
+ init_cache: bool = False,
200
+ output_attentions: bool = False,
201
+ ):
202
+ # if key_value_states are provided this layer is used as a cross-attention layer
203
+ # for the decoder
204
+ is_cross_attention = key_value_states is not None
205
+ batch_size = hidden_states.shape[0]
206
+
207
+ if not is_cross_attention:
208
+ qkv_out = self.c_attn(hidden_states)
209
+ query, key, value = jnp.split(qkv_out, 3, axis=2)
210
+ else:
211
+ q_out = self.q_attn(hidden_states)
212
+ (query,) = jnp.split(q_out, 1, axis=2)
213
+ kv_out = self.c_attn(key_value_states)
214
+ key, value = jnp.split(kv_out, 2, axis=2)
215
+
216
+ query = self._split_heads(query)
217
+ key = self._split_heads(key)
218
+ value = self._split_heads(value)
219
+
220
+ query_length, key_length = query.shape[1], key.shape[1]
221
+
222
+ if self.causal:
223
+ if self.has_variable("cache", "cached_key"):
224
+ mask_shift = self.variables["cache"]["cache_index"]
225
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
226
+ causal_mask = lax.dynamic_slice(
227
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
228
+ )
229
+ else:
230
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
231
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
232
+
233
+ # combine masks if needed
234
+ if attention_mask is not None and self.causal:
235
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
236
+ attention_mask = combine_masks(attention_mask, causal_mask)
237
+ elif self.causal:
238
+ attention_mask = causal_mask
239
+ elif attention_mask is not None:
240
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
241
+
242
+ dropout_rng = None
243
+ if not deterministic and self.config.attn_pdrop > 0.0:
244
+ dropout_rng = self.make_rng("dropout")
245
+
246
+ # During fast autoregressive decoding, we feed one position at a time,
247
+ # and cache the keys and values step by step.
248
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
249
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
250
+
251
+ # transform boolean mask into float mask
252
+ if attention_mask is not None:
253
+ attention_bias = lax.select(
254
+ attention_mask > 0,
255
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
256
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
257
+ )
258
+ else:
259
+ attention_bias = None
260
+
261
+ # usual dot product attention
262
+ attn_weights = dot_product_attention_weights(
263
+ query,
264
+ key,
265
+ bias=attention_bias,
266
+ dropout_rng=dropout_rng,
267
+ dropout_rate=self.config.attn_pdrop,
268
+ deterministic=deterministic,
269
+ dtype=self.dtype,
270
+ precision=None,
271
+ )
272
+
273
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
274
+ attn_output = self._merge_heads(attn_output)
275
+ attn_output = self.c_proj(attn_output)
276
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
277
+
278
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
279
+ return outputs
280
+
281
+
282
+ class FlaxGPT2MLP(nn.Module):
283
+ config: GPT2Config
284
+ intermediate_size: int
285
+ dtype: jnp.dtype = jnp.float32
286
+
287
+ def setup(self):
288
+ embed_dim = self.config.hidden_size
289
+ self.c_fc = FlaxConv1D(self.intermediate_size, dtype=self.dtype)
290
+ self.c_proj = FlaxConv1D(embed_dim, dtype=self.dtype)
291
+ self.act = ACT2FN[self.config.activation_function]
292
+ self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
293
+
294
+ def __call__(self, hidden_states, deterministic: bool = True):
295
+ hidden_states = self.c_fc(hidden_states)
296
+ hidden_states = self.act(hidden_states)
297
+ hidden_states = self.c_proj(hidden_states)
298
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
299
+ return hidden_states
300
+
301
+
302
+ class FlaxGPT2Block(nn.Module):
303
+ config: GPT2Config
304
+ dtype: jnp.dtype = jnp.float32
305
+
306
+ def setup(self):
307
+ hidden_size = self.config.hidden_size
308
+ inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
309
+
310
+ self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
311
+ self.attn = FlaxGPT2Attention(self.config, dtype=self.dtype)
312
+ self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
313
+
314
+ if self.config.add_cross_attention:
315
+ self.crossattention = FlaxGPT2Attention(
316
+ config=self.config, dtype=self.dtype, causal=False, is_cross_attention=True
317
+ )
318
+ self.ln_cross_attn = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
319
+
320
+ self.mlp = FlaxGPT2MLP(self.config, inner_dim, dtype=self.dtype)
321
+
322
+ def __call__(
323
+ self,
324
+ hidden_states,
325
+ attention_mask=None,
326
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
327
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
328
+ deterministic: bool = True,
329
+ init_cache: bool = False,
330
+ output_attentions: bool = False,
331
+ ):
332
+ residual = hidden_states
333
+ hidden_states = self.ln_1(hidden_states)
334
+ attn_outputs = self.attn(
335
+ hidden_states,
336
+ attention_mask=attention_mask,
337
+ deterministic=deterministic,
338
+ init_cache=init_cache,
339
+ output_attentions=output_attentions,
340
+ )
341
+ # residual connection
342
+ attn_output = attn_outputs[0] # output_attn: a, (attentions)
343
+ outputs = attn_outputs[1:]
344
+ # residual connection
345
+ hidden_states = attn_output + residual
346
+
347
+ # Cross-Attention Block
348
+ if encoder_hidden_states is not None:
349
+ # add one self-attention block for cross-attention
350
+ if not hasattr(self, "crossattention"):
351
+ raise ValueError(
352
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
353
+ "cross-attention layers by setting `config.add_cross_attention=True`"
354
+ )
355
+ residual = hidden_states
356
+ hidden_states = self.ln_cross_attn(hidden_states)
357
+ cross_attn_outputs = self.crossattention(
358
+ hidden_states,
359
+ key_value_states=encoder_hidden_states,
360
+ attention_mask=encoder_attention_mask,
361
+ deterministic=deterministic,
362
+ output_attentions=output_attentions,
363
+ )
364
+ attn_output = cross_attn_outputs[0]
365
+ # residual connection
366
+ hidden_states = residual + attn_output
367
+ outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights
368
+
369
+ residual = hidden_states
370
+ hidden_states = self.ln_2(hidden_states)
371
+ feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
372
+ # residual connection
373
+ hidden_states = residual + feed_forward_hidden_states
374
+
375
+ outputs = (hidden_states,) + outputs
376
+
377
+ return outputs
378
+
379
+
380
+ class FlaxGPT2PreTrainedModel(FlaxPreTrainedModel):
381
+ """
382
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
383
+ models.
384
+ """
385
+
386
+ config_class = GPT2Config
387
+ base_model_prefix = "transformer"
388
+ module_class: nn.Module = None
389
+
390
+ def __init__(
391
+ self,
392
+ config: GPT2Config,
393
+ input_shape: Tuple = (1, 1),
394
+ seed: int = 0,
395
+ dtype: jnp.dtype = jnp.float32,
396
+ _do_init: bool = True,
397
+ **kwargs,
398
+ ):
399
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
400
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
401
+
402
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
403
+ # init input tensors
404
+ input_ids = jnp.zeros(input_shape, dtype="i4")
405
+ attention_mask = jnp.ones_like(input_ids)
406
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
407
+ params_rng, dropout_rng = jax.random.split(rng)
408
+ rngs = {"params": params_rng, "dropout": dropout_rng}
409
+
410
+ if self.config.add_cross_attention:
411
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
412
+ encoder_attention_mask = attention_mask
413
+ module_init_outputs = self.module.init(
414
+ rngs,
415
+ input_ids,
416
+ attention_mask,
417
+ position_ids,
418
+ encoder_hidden_states,
419
+ encoder_attention_mask,
420
+ return_dict=False,
421
+ )
422
+ else:
423
+ module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
424
+
425
+ random_params = module_init_outputs["params"]
426
+
427
+ if params is not None:
428
+ random_params = flatten_dict(unfreeze(random_params))
429
+ params = flatten_dict(unfreeze(params))
430
+ for missing_key in self._missing_keys:
431
+ params[missing_key] = random_params[missing_key]
432
+ self._missing_keys = set()
433
+ return freeze(unflatten_dict(params))
434
+ else:
435
+ return random_params
436
+
437
+ def init_cache(self, batch_size, max_length):
438
+ r"""
439
+ Args:
440
+ batch_size (`int`):
441
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
442
+ max_length (`int`):
443
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
444
+ cache.
445
+ """
446
+ # init input variables to retrieve cache
447
+ input_ids = jnp.ones((batch_size, max_length))
448
+ attention_mask = jnp.ones_like(input_ids)
449
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
450
+
451
+ init_variables = self.module.init(
452
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
453
+ )
454
+ return unfreeze(init_variables["cache"])
455
+
456
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
457
+ def __call__(
458
+ self,
459
+ input_ids,
460
+ attention_mask=None,
461
+ position_ids=None,
462
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
463
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
464
+ params: dict = None,
465
+ past_key_values: dict = None,
466
+ dropout_rng: jax.random.PRNGKey = None,
467
+ train: bool = False,
468
+ output_attentions: Optional[bool] = None,
469
+ output_hidden_states: Optional[bool] = None,
470
+ return_dict: Optional[bool] = None,
471
+ ):
472
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
473
+ output_hidden_states = (
474
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
475
+ )
476
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
477
+
478
+ if encoder_hidden_states is not None and encoder_attention_mask is None:
479
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
480
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
481
+
482
+ batch_size, sequence_length = input_ids.shape
483
+
484
+ if position_ids is None:
485
+ if past_key_values is not None:
486
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
487
+
488
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
489
+
490
+ if attention_mask is None:
491
+ attention_mask = jnp.ones((batch_size, sequence_length))
492
+
493
+ # Handle any PRNG if needed
494
+ rngs = {}
495
+ if dropout_rng is not None:
496
+ rngs["dropout"] = dropout_rng
497
+
498
+ inputs = {"params": params or self.params}
499
+
500
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPT2Attention module
501
+ if past_key_values:
502
+ inputs["cache"] = past_key_values
503
+ mutable = ["cache"]
504
+ else:
505
+ mutable = False
506
+
507
+ outputs = self.module.apply(
508
+ inputs,
509
+ jnp.array(input_ids, dtype="i4"),
510
+ jnp.array(attention_mask, dtype="i4"),
511
+ jnp.array(position_ids, dtype="i4"),
512
+ encoder_hidden_states,
513
+ encoder_attention_mask,
514
+ not train,
515
+ False,
516
+ output_attentions,
517
+ output_hidden_states,
518
+ return_dict,
519
+ rngs=rngs,
520
+ mutable=mutable,
521
+ )
522
+
523
+ # add updated cache to model output
524
+ if past_key_values is not None and return_dict:
525
+ outputs, past_key_values = outputs
526
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
527
+ return outputs
528
+ elif past_key_values is not None and not return_dict:
529
+ outputs, past_key_values = outputs
530
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
531
+
532
+ return outputs
533
+
534
+
535
+ class FlaxGPT2BlockCollection(nn.Module):
536
+ config: GPT2Config
537
+ dtype: jnp.dtype = jnp.float32
538
+
539
+ def setup(self):
540
+ self.blocks = [
541
+ FlaxGPT2Block(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
542
+ ]
543
+
544
+ def __call__(
545
+ self,
546
+ hidden_states,
547
+ attention_mask=None,
548
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
549
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
550
+ deterministic: bool = True,
551
+ init_cache: bool = False,
552
+ output_attentions: bool = False,
553
+ output_hidden_states: bool = False,
554
+ return_dict: bool = True,
555
+ ):
556
+ all_attentions = () if output_attentions else None
557
+ all_hidden_states = () if output_hidden_states else None
558
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
559
+
560
+ for block in self.blocks:
561
+ if output_hidden_states:
562
+ all_hidden_states += (hidden_states,)
563
+
564
+ layer_outputs = block(
565
+ hidden_states,
566
+ attention_mask,
567
+ encoder_hidden_states=encoder_hidden_states,
568
+ encoder_attention_mask=encoder_attention_mask,
569
+ deterministic=deterministic,
570
+ init_cache=init_cache,
571
+ output_attentions=output_attentions,
572
+ )
573
+ hidden_states = layer_outputs[0]
574
+
575
+ if output_attentions:
576
+ all_attentions += (layer_outputs[1],)
577
+
578
+ if encoder_hidden_states is not None:
579
+ all_cross_attentions += (layer_outputs[2],)
580
+
581
+ # this contains possible `None` values - `FlaxGPT2Module` will filter them out
582
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
583
+
584
+ return outputs
585
+
586
+
587
+ class FlaxGPT2Module(nn.Module):
588
+ config: GPT2Config
589
+ dtype: jnp.dtype = jnp.float32
590
+
591
+ def setup(self):
592
+ self.embed_dim = self.config.hidden_size
593
+
594
+ self.wte = nn.Embed(
595
+ self.config.vocab_size,
596
+ self.embed_dim,
597
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
598
+ dtype=self.dtype,
599
+ )
600
+ self.wpe = nn.Embed(
601
+ self.config.max_position_embeddings,
602
+ self.embed_dim,
603
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
604
+ dtype=self.dtype,
605
+ )
606
+ self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
607
+ self.h = FlaxGPT2BlockCollection(self.config, dtype=self.dtype)
608
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
609
+
610
+ def __call__(
611
+ self,
612
+ input_ids,
613
+ attention_mask,
614
+ position_ids,
615
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
616
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
617
+ deterministic=True,
618
+ init_cache: bool = False,
619
+ output_attentions: bool = False,
620
+ output_hidden_states: bool = False,
621
+ return_dict: bool = True,
622
+ ):
623
+ input_embeds = self.wte(input_ids.astype("i4"))
624
+ position_embeds = self.wpe(position_ids.astype("i4"))
625
+
626
+ hidden_states = input_embeds + position_embeds
627
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
628
+
629
+ outputs = self.h(
630
+ hidden_states,
631
+ attention_mask,
632
+ encoder_hidden_states,
633
+ encoder_attention_mask,
634
+ deterministic=deterministic,
635
+ init_cache=init_cache,
636
+ output_attentions=output_attentions,
637
+ output_hidden_states=output_hidden_states,
638
+ return_dict=return_dict,
639
+ )
640
+
641
+ hidden_states = outputs[0]
642
+ hidden_states = self.ln_f(hidden_states)
643
+
644
+ if output_hidden_states:
645
+ all_hidden_states = outputs[1] + (hidden_states,)
646
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
647
+ else:
648
+ outputs = (hidden_states,) + outputs[1:]
649
+
650
+ if not return_dict:
651
+ return tuple(v for v in outputs if v is not None)
652
+
653
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
654
+ last_hidden_state=hidden_states,
655
+ hidden_states=outputs[1],
656
+ attentions=outputs[2],
657
+ cross_attentions=outputs[3],
658
+ )
659
+
660
+
661
+ @add_start_docstrings(
662
+ "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
663
+ GPT2_START_DOCSTRING,
664
+ )
665
+ class FlaxGPT2Model(FlaxGPT2PreTrainedModel):
666
+ module_class = FlaxGPT2Module
667
+
668
+
669
+ append_call_sample_docstring(
670
+ FlaxGPT2Model,
671
+ _CHECKPOINT_FOR_DOC,
672
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
673
+ _CONFIG_FOR_DOC,
674
+ )
675
+
676
+
677
+ class FlaxGPT2LMHeadModule(nn.Module):
678
+ config: GPT2Config
679
+ dtype: jnp.dtype = jnp.float32
680
+
681
+ def setup(self):
682
+ self.transformer = FlaxGPT2Module(self.config, dtype=self.dtype)
683
+ self.lm_head = nn.Dense(
684
+ self.config.vocab_size,
685
+ use_bias=False,
686
+ dtype=self.dtype,
687
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
688
+ )
689
+
690
+ def __call__(
691
+ self,
692
+ input_ids,
693
+ attention_mask,
694
+ position_ids,
695
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
696
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
697
+ deterministic: bool = True,
698
+ init_cache: bool = False,
699
+ output_attentions: bool = False,
700
+ output_hidden_states: bool = False,
701
+ return_dict: bool = True,
702
+ ):
703
+ outputs = self.transformer(
704
+ input_ids,
705
+ attention_mask,
706
+ position_ids,
707
+ encoder_hidden_states,
708
+ encoder_attention_mask,
709
+ deterministic=deterministic,
710
+ init_cache=init_cache,
711
+ output_attentions=output_attentions,
712
+ output_hidden_states=output_hidden_states,
713
+ return_dict=return_dict,
714
+ )
715
+
716
+ hidden_states = outputs[0]
717
+
718
+ if self.config.tie_word_embeddings:
719
+ shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
720
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
721
+ else:
722
+ lm_logits = self.lm_head(hidden_states)
723
+
724
+ if not return_dict:
725
+ return (lm_logits,) + outputs[1:]
726
+
727
+ return FlaxCausalLMOutputWithCrossAttentions(
728
+ logits=lm_logits,
729
+ hidden_states=outputs.hidden_states,
730
+ attentions=outputs.attentions,
731
+ cross_attentions=outputs.cross_attentions,
732
+ )
733
+
734
+
735
+ @add_start_docstrings(
736
+ """
737
+ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
738
+ embeddings).
739
+ """,
740
+ GPT2_START_DOCSTRING,
741
+ )
742
+ class FlaxGPT2LMHeadModel(FlaxGPT2PreTrainedModel):
743
+ module_class = FlaxGPT2LMHeadModule
744
+
745
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
746
+ # initializing the cache
747
+ batch_size, seq_length = input_ids.shape
748
+
749
+ past_key_values = self.init_cache(batch_size, max_length)
750
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
751
+ # But since GPT2 uses a causal mask, those positions are masked anyways.
752
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
753
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
754
+ if attention_mask is not None:
755
+ position_ids = attention_mask.cumsum(axis=-1) - 1
756
+ extended_attention_mask = lax.dynamic_update_slice(
757
+ extended_attention_mask, attention_mask.astype("i4"), (0, 0)
758
+ )
759
+ else:
760
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
761
+
762
+ return {
763
+ "past_key_values": past_key_values,
764
+ "attention_mask": extended_attention_mask,
765
+ "position_ids": position_ids,
766
+ }
767
+
768
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
769
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
770
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
771
+ return model_kwargs
772
+
773
+
774
+ append_call_sample_docstring(
775
+ FlaxGPT2LMHeadModel,
776
+ _CHECKPOINT_FOR_DOC,
777
+ FlaxCausalLMOutputWithCrossAttentions,
778
+ _CONFIG_FOR_DOC,
779
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py ADDED
@@ -0,0 +1,1944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch OpenAI GPT-2 model."""
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.cuda.amp import autocast
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import ACT2FN
32
+ from ...modeling_outputs import (
33
+ BaseModelOutputWithPastAndCrossAttentions,
34
+ CausalLMOutputWithCrossAttentions,
35
+ QuestionAnsweringModelOutput,
36
+ SequenceClassifierOutputWithPast,
37
+ TokenClassifierOutput,
38
+ )
39
+ from ...modeling_utils import PreTrainedModel, SequenceSummary
40
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
41
+ from ...utils import (
42
+ ModelOutput,
43
+ add_code_sample_docstrings,
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ is_flash_attn_2_available,
47
+ is_flash_attn_greater_or_equal_2_10,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from ...utils.model_parallel_utils import assert_device_map, get_device_map
52
+ from .configuration_gpt2 import GPT2Config
53
+
54
+
55
+ if is_flash_attn_2_available():
56
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
57
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CHECKPOINT_FOR_DOC = "openai-community/gpt2"
63
+ _CONFIG_FOR_DOC = "GPT2Config"
64
+
65
+
66
+ from ..deprecated._archive_maps import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
70
+ def _get_unpad_data(attention_mask):
71
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
72
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
73
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
74
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
75
+ return (
76
+ indices,
77
+ cu_seqlens,
78
+ max_seqlen_in_batch,
79
+ )
80
+
81
+
82
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
83
+ """Load tf checkpoints in a pytorch model"""
84
+ try:
85
+ import re
86
+
87
+ import tensorflow as tf
88
+ except ImportError:
89
+ logger.error(
90
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
91
+ "https://www.tensorflow.org/install/ for installation instructions."
92
+ )
93
+ raise
94
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
95
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
96
+ # Load weights from TF model
97
+ init_vars = tf.train.list_variables(tf_path)
98
+ names = []
99
+ arrays = []
100
+ for name, shape in init_vars:
101
+ logger.info(f"Loading TF weight {name} with shape {shape}")
102
+ array = tf.train.load_variable(tf_path, name)
103
+ names.append(name)
104
+ arrays.append(array.squeeze())
105
+
106
+ for name, array in zip(names, arrays):
107
+ name = name[6:] # skip "model/"
108
+ name = name.split("/")
109
+ pointer = model
110
+ for m_name in name:
111
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
112
+ scope_names = re.split(r"(\d+)", m_name)
113
+ else:
114
+ scope_names = [m_name]
115
+ if scope_names[0] == "w" or scope_names[0] == "g":
116
+ pointer = getattr(pointer, "weight")
117
+ elif scope_names[0] == "b":
118
+ pointer = getattr(pointer, "bias")
119
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
120
+ pointer = getattr(pointer, scope_names[0])
121
+ pointer = getattr(pointer, "weight")
122
+ else:
123
+ pointer = getattr(pointer, scope_names[0])
124
+ if len(scope_names) >= 2:
125
+ num = int(scope_names[1])
126
+ pointer = pointer[num]
127
+ try:
128
+ if pointer.shape != array.shape:
129
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
130
+ except ValueError as e:
131
+ e.args += (pointer.shape, array.shape)
132
+ raise
133
+ logger.info(f"Initialize PyTorch weight {name}")
134
+ pointer.data = torch.from_numpy(array)
135
+ return model
136
+
137
+
138
+ class GPT2Attention(nn.Module):
139
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
140
+ super().__init__()
141
+ self.config = config
142
+ max_positions = config.max_position_embeddings
143
+ self.register_buffer(
144
+ "bias",
145
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
146
+ 1, 1, max_positions, max_positions
147
+ ),
148
+ persistent=False,
149
+ )
150
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
151
+
152
+ self.embed_dim = config.hidden_size
153
+ self.num_heads = config.num_attention_heads
154
+ self.head_dim = self.embed_dim // self.num_heads
155
+ self.split_size = self.embed_dim
156
+ if self.head_dim * self.num_heads != self.embed_dim:
157
+ raise ValueError(
158
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
159
+ f" {self.num_heads})."
160
+ )
161
+
162
+ self.scale_attn_weights = config.scale_attn_weights
163
+ self.is_cross_attention = is_cross_attention
164
+
165
+ # Layer-wise attention scaling, reordering, and upcasting
166
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
167
+ self.layer_idx = layer_idx
168
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
169
+
170
+ if self.is_cross_attention:
171
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
172
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
173
+ else:
174
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
175
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
176
+
177
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
178
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
179
+ self.is_causal = True
180
+
181
+ self.pruned_heads = set()
182
+
183
+ def prune_heads(self, heads):
184
+ if len(heads) == 0:
185
+ return
186
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
187
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
188
+
189
+ # Prune conv1d layers
190
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
191
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
192
+
193
+ # Update hyper params
194
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
195
+ self.num_heads = self.num_heads - len(heads)
196
+ self.pruned_heads = self.pruned_heads.union(heads)
197
+
198
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
199
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
200
+
201
+ if self.scale_attn_weights:
202
+ attn_weights = attn_weights / torch.full(
203
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
204
+ )
205
+
206
+ # Layer-wise attention scaling
207
+ if self.scale_attn_by_inverse_layer_idx:
208
+ attn_weights = attn_weights / float(self.layer_idx + 1)
209
+
210
+ if not self.is_cross_attention:
211
+ # if only "normal" attention layer implements causal mask
212
+ query_length, key_length = query.size(-2), key.size(-2)
213
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
214
+ mask_value = torch.finfo(attn_weights.dtype).min
215
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
216
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
217
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
218
+ attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
219
+
220
+ if attention_mask is not None:
221
+ # Apply the attention mask
222
+ attn_weights = attn_weights + attention_mask
223
+
224
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
225
+
226
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
227
+ attn_weights = attn_weights.type(value.dtype)
228
+ attn_weights = self.attn_dropout(attn_weights)
229
+
230
+ # Mask heads if we want to
231
+ if head_mask is not None:
232
+ attn_weights = attn_weights * head_mask
233
+
234
+ attn_output = torch.matmul(attn_weights, value)
235
+
236
+ return attn_output, attn_weights
237
+
238
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
239
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
240
+ bsz, num_heads, q_seq_len, dk = query.size()
241
+ _, _, k_seq_len, _ = key.size()
242
+
243
+ # Preallocate attn_weights for `baddbmm`
244
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
245
+
246
+ # Compute Scale Factor
247
+ scale_factor = 1.0
248
+ if self.scale_attn_weights:
249
+ scale_factor /= float(value.size(-1)) ** 0.5
250
+
251
+ if self.scale_attn_by_inverse_layer_idx:
252
+ scale_factor /= float(self.layer_idx + 1)
253
+
254
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
255
+ with autocast(enabled=False):
256
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
257
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
258
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
259
+
260
+ if not self.is_cross_attention:
261
+ # if only "normal" attention layer implements causal mask
262
+ query_length, key_length = query.size(-2), key.size(-2)
263
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
264
+ mask_value = torch.finfo(attn_weights.dtype).min
265
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
266
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
267
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
268
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
269
+
270
+ if attention_mask is not None:
271
+ # Apply the attention mask
272
+ attn_weights = attn_weights + attention_mask
273
+
274
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
275
+
276
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
277
+ if attn_weights.dtype != torch.float32:
278
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
279
+ attn_weights = attn_weights.type(value.dtype)
280
+ attn_weights = self.attn_dropout(attn_weights)
281
+
282
+ # Mask heads if we want to
283
+ if head_mask is not None:
284
+ attn_weights = attn_weights * head_mask
285
+
286
+ attn_output = torch.matmul(attn_weights, value)
287
+
288
+ return attn_output, attn_weights
289
+
290
+ def _split_heads(self, tensor, num_heads, attn_head_size):
291
+ """
292
+ Splits hidden_size dim into attn_head_size and num_heads
293
+ """
294
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
295
+ tensor = tensor.view(new_shape)
296
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
297
+
298
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
299
+ """
300
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
301
+ """
302
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
303
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
304
+ return tensor.view(new_shape)
305
+
306
+ def forward(
307
+ self,
308
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
309
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
310
+ attention_mask: Optional[torch.FloatTensor] = None,
311
+ head_mask: Optional[torch.FloatTensor] = None,
312
+ encoder_hidden_states: Optional[torch.Tensor] = None,
313
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
314
+ use_cache: Optional[bool] = False,
315
+ output_attentions: Optional[bool] = False,
316
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
317
+ if encoder_hidden_states is not None:
318
+ if not hasattr(self, "q_attn"):
319
+ raise ValueError(
320
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
321
+ "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
322
+ )
323
+
324
+ query = self.q_attn(hidden_states)
325
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
326
+ attention_mask = encoder_attention_mask
327
+ else:
328
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
329
+
330
+ query = self._split_heads(query, self.num_heads, self.head_dim)
331
+ key = self._split_heads(key, self.num_heads, self.head_dim)
332
+ value = self._split_heads(value, self.num_heads, self.head_dim)
333
+
334
+ if layer_past is not None:
335
+ past_key, past_value = layer_past
336
+ key = torch.cat((past_key, key), dim=-2)
337
+ value = torch.cat((past_value, value), dim=-2)
338
+
339
+ if use_cache is True:
340
+ present = (key, value)
341
+ else:
342
+ present = None
343
+
344
+ if self.reorder_and_upcast_attn:
345
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
346
+ else:
347
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
348
+
349
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
350
+ attn_output = self.c_proj(attn_output)
351
+ attn_output = self.resid_dropout(attn_output)
352
+
353
+ outputs = (attn_output, present)
354
+ if output_attentions:
355
+ outputs += (attn_weights,)
356
+
357
+ return outputs # a, present, (attentions)
358
+
359
+
360
+ class GPT2FlashAttention2(GPT2Attention):
361
+ """
362
+ GPT2 flash attention module. This module inherits from `GPT2Attention` as the weights of the module stays
363
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
364
+ flash attention and deal with padding tokens in case the input contains any of them.
365
+ """
366
+
367
+ def __init__(self, *args, **kwargs):
368
+ super().__init__(*args, **kwargs)
369
+
370
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
371
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
372
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
373
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
374
+
375
+ def forward(
376
+ self,
377
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
378
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
379
+ attention_mask: Optional[torch.FloatTensor] = None,
380
+ head_mask: Optional[torch.FloatTensor] = None,
381
+ encoder_hidden_states: Optional[torch.Tensor] = None,
382
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
383
+ use_cache: Optional[bool] = False,
384
+ output_attentions: Optional[bool] = False,
385
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
386
+ bsz, _, _ = hidden_states.size()
387
+ if encoder_hidden_states is not None:
388
+ if not hasattr(self, "q_attn"):
389
+ raise ValueError(
390
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
391
+ "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
392
+ )
393
+
394
+ query = self.q_attn(hidden_states)
395
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
396
+ attention_mask = encoder_attention_mask
397
+ else:
398
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
399
+
400
+ query = self._split_heads(query, self.num_heads, self.head_dim)
401
+ key = self._split_heads(key, self.num_heads, self.head_dim)
402
+ value = self._split_heads(value, self.num_heads, self.head_dim)
403
+
404
+ if layer_past is not None:
405
+ past_key = layer_past[0]
406
+ past_value = layer_past[1]
407
+ key = torch.cat((past_key, key), dim=-2)
408
+ value = torch.cat((past_value, value), dim=-2)
409
+
410
+ present = None
411
+ if use_cache is True:
412
+ present = (key, value)
413
+
414
+ query_length = query.shape[2]
415
+ tgt_len = key.shape[2]
416
+
417
+ # Flash attention requires the input to have the shape
418
+ # batch_size x seq_length x head_dim x hidden_dim
419
+ query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim)
420
+ key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
421
+ value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
422
+
423
+ attn_dropout = self.attn_dropout.p if self.training else 0.0
424
+
425
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
426
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
427
+ # cast them back in the correct dtype just to be sure everything works as expected.
428
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
429
+ # in fp32. (LlamaRMSNorm handles it correctly)
430
+
431
+ if query.dtype == torch.float32:
432
+ if torch.is_autocast_enabled():
433
+ target_dtype = torch.get_autocast_gpu_dtype()
434
+ # Handle the case where the model is quantized
435
+ elif hasattr(self.config, "_pre_quantization_dtype"):
436
+ target_dtype = self.config._pre_quantization_dtype
437
+ else:
438
+ target_dtype = self.c_proj.weight.dtype
439
+
440
+ logger.warning_once(
441
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
442
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
443
+ f" {target_dtype}."
444
+ )
445
+
446
+ query = query.to(target_dtype)
447
+ key = key.to(target_dtype)
448
+ value = value.to(target_dtype)
449
+
450
+ attn_output = self._flash_attention_forward(
451
+ query, key, value, attention_mask, query_length, dropout=attn_dropout
452
+ )
453
+
454
+ attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim)
455
+ attn_output = self.c_proj(attn_weights_reshaped)
456
+ attn_output = self.resid_dropout(attn_output)
457
+
458
+ outputs = (attn_output, present)
459
+ if output_attentions:
460
+ outputs += (attn_weights_reshaped,)
461
+
462
+ return outputs
463
+
464
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
465
+ def _flash_attention_forward(
466
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
467
+ ):
468
+ """
469
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
470
+ first unpad the input, then computes the attention scores and pad the final attention scores.
471
+
472
+ Args:
473
+ query_states (`torch.Tensor`):
474
+ Input query states to be passed to Flash Attention API
475
+ key_states (`torch.Tensor`):
476
+ Input key states to be passed to Flash Attention API
477
+ value_states (`torch.Tensor`):
478
+ Input value states to be passed to Flash Attention API
479
+ attention_mask (`torch.Tensor`):
480
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
481
+ position of padding tokens and 1 for the position of non-padding tokens.
482
+ dropout (`float`):
483
+ Attention dropout
484
+ softmax_scale (`float`, *optional*):
485
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
486
+ """
487
+ if not self._flash_attn_uses_top_left_mask:
488
+ causal = self.is_causal
489
+ else:
490
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
491
+ causal = self.is_causal and query_length != 1
492
+
493
+ # Contains at least one padding token in the sequence
494
+ if attention_mask is not None:
495
+ batch_size = query_states.shape[0]
496
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
497
+ query_states, key_states, value_states, attention_mask, query_length
498
+ )
499
+
500
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
501
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
502
+
503
+ attn_output_unpad = flash_attn_varlen_func(
504
+ query_states,
505
+ key_states,
506
+ value_states,
507
+ cu_seqlens_q=cu_seqlens_q,
508
+ cu_seqlens_k=cu_seqlens_k,
509
+ max_seqlen_q=max_seqlen_in_batch_q,
510
+ max_seqlen_k=max_seqlen_in_batch_k,
511
+ dropout_p=dropout,
512
+ softmax_scale=softmax_scale,
513
+ causal=causal,
514
+ )
515
+
516
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
517
+ else:
518
+ attn_output = flash_attn_func(
519
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
520
+ )
521
+
522
+ return attn_output
523
+
524
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
525
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
526
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
527
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
528
+
529
+ key_layer = index_first_axis(
530
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
531
+ )
532
+ value_layer = index_first_axis(
533
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
534
+ )
535
+ if query_length == kv_seq_len:
536
+ query_layer = index_first_axis(
537
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
538
+ )
539
+ cu_seqlens_q = cu_seqlens_k
540
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
541
+ indices_q = indices_k
542
+ elif query_length == 1:
543
+ max_seqlen_in_batch_q = 1
544
+ cu_seqlens_q = torch.arange(
545
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
546
+ ) # There is a memcpy here, that is very bad.
547
+ indices_q = cu_seqlens_q[:-1]
548
+ query_layer = query_layer.squeeze(1)
549
+ else:
550
+ # The -q_len: slice assumes left padding.
551
+ attention_mask = attention_mask[:, -query_length:]
552
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
553
+
554
+ return (
555
+ query_layer,
556
+ key_layer,
557
+ value_layer,
558
+ indices_q,
559
+ (cu_seqlens_q, cu_seqlens_k),
560
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
561
+ )
562
+
563
+
564
+ class GPT2MLP(nn.Module):
565
+ def __init__(self, intermediate_size, config):
566
+ super().__init__()
567
+ embed_dim = config.hidden_size
568
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
569
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
570
+ self.act = ACT2FN[config.activation_function]
571
+ self.dropout = nn.Dropout(config.resid_pdrop)
572
+
573
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
574
+ hidden_states = self.c_fc(hidden_states)
575
+ hidden_states = self.act(hidden_states)
576
+ hidden_states = self.c_proj(hidden_states)
577
+ hidden_states = self.dropout(hidden_states)
578
+ return hidden_states
579
+
580
+
581
+ GPT2_ATTENTION_CLASSES = {
582
+ "eager": GPT2Attention,
583
+ "flash_attention_2": GPT2FlashAttention2,
584
+ }
585
+
586
+
587
+ class GPT2Block(nn.Module):
588
+ def __init__(self, config, layer_idx=None):
589
+ super().__init__()
590
+ hidden_size = config.hidden_size
591
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
592
+ attention_class = GPT2_ATTENTION_CLASSES[config._attn_implementation]
593
+
594
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
595
+ self.attn = attention_class(config=config, layer_idx=layer_idx)
596
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
597
+
598
+ if config.add_cross_attention:
599
+ self.crossattention = attention_class(config=config, is_cross_attention=True, layer_idx=layer_idx)
600
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
601
+
602
+ self.mlp = GPT2MLP(inner_dim, config)
603
+
604
+ def forward(
605
+ self,
606
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
607
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
608
+ attention_mask: Optional[torch.FloatTensor] = None,
609
+ head_mask: Optional[torch.FloatTensor] = None,
610
+ encoder_hidden_states: Optional[torch.Tensor] = None,
611
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
612
+ use_cache: Optional[bool] = False,
613
+ output_attentions: Optional[bool] = False,
614
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
615
+ residual = hidden_states
616
+ hidden_states = self.ln_1(hidden_states)
617
+ attn_outputs = self.attn(
618
+ hidden_states,
619
+ layer_past=layer_past,
620
+ attention_mask=attention_mask,
621
+ head_mask=head_mask,
622
+ use_cache=use_cache,
623
+ output_attentions=output_attentions,
624
+ )
625
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
626
+ outputs = attn_outputs[1:]
627
+ # residual connection
628
+ hidden_states = attn_output + residual
629
+
630
+ if encoder_hidden_states is not None:
631
+ # add one self-attention block for cross-attention
632
+ if not hasattr(self, "crossattention"):
633
+ raise ValueError(
634
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
635
+ "cross-attention layers by setting `config.add_cross_attention=True`"
636
+ )
637
+ residual = hidden_states
638
+ hidden_states = self.ln_cross_attn(hidden_states)
639
+ cross_attn_outputs = self.crossattention(
640
+ hidden_states,
641
+ attention_mask=attention_mask,
642
+ head_mask=head_mask,
643
+ encoder_hidden_states=encoder_hidden_states,
644
+ encoder_attention_mask=encoder_attention_mask,
645
+ output_attentions=output_attentions,
646
+ )
647
+ attn_output = cross_attn_outputs[0]
648
+ # residual connection
649
+ hidden_states = residual + attn_output
650
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
651
+
652
+ residual = hidden_states
653
+ hidden_states = self.ln_2(hidden_states)
654
+ feed_forward_hidden_states = self.mlp(hidden_states)
655
+ # residual connection
656
+ hidden_states = residual + feed_forward_hidden_states
657
+
658
+ if use_cache:
659
+ outputs = (hidden_states,) + outputs
660
+ else:
661
+ outputs = (hidden_states,) + outputs[1:]
662
+
663
+ return outputs # hidden_states, present, (attentions, cross_attentions)
664
+
665
+
666
+ class GPT2PreTrainedModel(PreTrainedModel):
667
+ """
668
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
669
+ models.
670
+ """
671
+
672
+ config_class = GPT2Config
673
+ load_tf_weights = load_tf_weights_in_gpt2
674
+ base_model_prefix = "transformer"
675
+ is_parallelizable = True
676
+ supports_gradient_checkpointing = True
677
+ _no_split_modules = ["GPT2Block"]
678
+ _skip_keys_device_placement = "past_key_values"
679
+ _supports_flash_attn_2 = True
680
+
681
+ def __init__(self, *inputs, **kwargs):
682
+ super().__init__(*inputs, **kwargs)
683
+
684
+ def _init_weights(self, module):
685
+ """Initialize the weights."""
686
+ if isinstance(module, (nn.Linear, Conv1D)):
687
+ # Slightly different from the TF version which uses truncated_normal for initialization
688
+ # cf https://github.com/pytorch/pytorch/pull/5617
689
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
690
+ if module.bias is not None:
691
+ module.bias.data.zero_()
692
+ elif isinstance(module, nn.Embedding):
693
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
694
+ if module.padding_idx is not None:
695
+ module.weight.data[module.padding_idx].zero_()
696
+ elif isinstance(module, nn.LayerNorm):
697
+ module.bias.data.zero_()
698
+ module.weight.data.fill_(1.0)
699
+
700
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
701
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
702
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
703
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
704
+ #
705
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
706
+ for name, p in module.named_parameters():
707
+ if name == "c_proj.weight":
708
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
709
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
710
+
711
+
712
+ @dataclass
713
+ class GPT2DoubleHeadsModelOutput(ModelOutput):
714
+ """
715
+ Base class for outputs of models predicting if two sentences are consecutive or not.
716
+
717
+ Args:
718
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
719
+ Language modeling loss.
720
+ mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
721
+ Multiple choice classification loss.
722
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
723
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
724
+ mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
725
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
726
+ past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
727
+ Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
728
+ sequence_length, embed_size_per_head)`).
729
+
730
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
731
+ `past_key_values` input) to speed up sequential decoding.
732
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
733
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
734
+ shape `(batch_size, sequence_length, hidden_size)`.
735
+
736
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
737
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
738
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
739
+ sequence_length)`.
740
+
741
+ GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
742
+ self-attention heads.
743
+ """
744
+
745
+ loss: Optional[torch.FloatTensor] = None
746
+ mc_loss: Optional[torch.FloatTensor] = None
747
+ logits: torch.FloatTensor = None
748
+ mc_logits: torch.FloatTensor = None
749
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
750
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
751
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
752
+
753
+
754
+ GPT2_START_DOCSTRING = r"""
755
+
756
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
757
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
758
+ etc.)
759
+
760
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
761
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
762
+ and behavior.
763
+
764
+ Parameters:
765
+ config ([`GPT2Config`]): Model configuration class with all the parameters of the model.
766
+ Initializing with a config file does not load the weights associated with the model, only the
767
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
768
+ """
769
+
770
+ GPT2_INPUTS_DOCSTRING = r"""
771
+ Args:
772
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
773
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
774
+ `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
775
+ sequence tokens in the vocabulary.
776
+
777
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
778
+ `input_ids`.
779
+
780
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
781
+ [`PreTrainedTokenizer.__call__`] for details.
782
+
783
+ [What are input IDs?](../glossary#input-ids)
784
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
785
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
786
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
787
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
788
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
789
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
790
+
791
+ - 1 for tokens that are **not masked**,
792
+ - 0 for tokens that are **masked**.
793
+
794
+ If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
795
+ `past_key_values`. In other words, the `attention_mask` always has to have the length:
796
+ `len(past_key_values) + len(input_ids)`
797
+
798
+ [What are attention masks?](../glossary#attention-mask)
799
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
800
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
801
+ 1]`:
802
+
803
+ - 0 corresponds to a *sentence A* token,
804
+ - 1 corresponds to a *sentence B* token.
805
+
806
+ [What are token type IDs?](../glossary#token-type-ids)
807
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
808
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
809
+ config.max_position_embeddings - 1]`.
810
+
811
+ [What are position IDs?](../glossary#position-ids)
812
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
813
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
814
+
815
+ - 1 indicates the head is **not masked**,
816
+ - 0 indicates the head is **masked**.
817
+
818
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
819
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
820
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
821
+ model's internal embedding lookup matrix.
822
+
823
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
824
+ `past_key_values`).
825
+ use_cache (`bool`, *optional*):
826
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
827
+ `past_key_values`).
828
+ output_attentions (`bool`, *optional*):
829
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
830
+ tensors for more detail.
831
+ output_hidden_states (`bool`, *optional*):
832
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
833
+ more detail.
834
+ return_dict (`bool`, *optional*):
835
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
836
+ """
837
+ PARALLELIZE_DOCSTRING = r"""
838
+ This is an experimental feature and is a subject to change at a moment's notice.
839
+
840
+ Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
841
+ it will evenly distribute blocks across all devices.
842
+
843
+ Args:
844
+ device_map (`Dict[int, list]`, optional, defaults to None):
845
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
846
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
847
+ have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
848
+ following number of attention modules:
849
+
850
+ - openai-community/gpt2: 12
851
+ - openai-community/gpt2-medium: 24
852
+ - openai-community/gpt2-large: 36
853
+ - openai-community/gpt2-xl: 48
854
+
855
+ Example:
856
+
857
+ ```python
858
+ # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
859
+ model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl")
860
+ device_map = {
861
+ 0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
862
+ 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
863
+ 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
864
+ 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],
865
+ }
866
+ model.parallelize(device_map)
867
+ ```
868
+ """
869
+ DEPARALLELIZE_DOCSTRING = r"""
870
+ Moves the model to cpu from a model parallel state.
871
+
872
+ Example:
873
+
874
+ ```python
875
+ # On a 4 GPU machine with openai-community/gpt2-large:
876
+ model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large")
877
+ device_map = {
878
+ 0: [0, 1, 2, 3, 4, 5, 6, 7],
879
+ 1: [8, 9, 10, 11, 12, 13, 14, 15],
880
+ 2: [16, 17, 18, 19, 20, 21, 22, 23],
881
+ 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
882
+ }
883
+ model.parallelize(device_map) # Splits the model across several devices
884
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
885
+ ```
886
+ """
887
+
888
+
889
+ @add_start_docstrings(
890
+ "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
891
+ GPT2_START_DOCSTRING,
892
+ )
893
+ class GPT2Model(GPT2PreTrainedModel):
894
+ def __init__(self, config):
895
+ super().__init__(config)
896
+
897
+ self.embed_dim = config.hidden_size
898
+
899
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
900
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
901
+
902
+ self.drop = nn.Dropout(config.embd_pdrop)
903
+ self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
904
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
905
+
906
+ # Model parallel
907
+ self.model_parallel = False
908
+ self.device_map = None
909
+ self.gradient_checkpointing = False
910
+ self._attn_implementation = config._attn_implementation
911
+
912
+ # Initialize weights and apply final processing
913
+ self.post_init()
914
+
915
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
916
+ def parallelize(self, device_map=None):
917
+ # Check validity of device_map
918
+ warnings.warn(
919
+ "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
920
+ " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
921
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
922
+ " ...}",
923
+ FutureWarning,
924
+ )
925
+ self.device_map = (
926
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
927
+ )
928
+ assert_device_map(self.device_map, len(self.h))
929
+ self.model_parallel = True
930
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
931
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
932
+ self.wte = self.wte.to(self.first_device)
933
+ self.wpe = self.wpe.to(self.first_device)
934
+ # Load onto devices
935
+ for k, v in self.device_map.items():
936
+ for block in v:
937
+ cuda_device = "cuda:" + str(k)
938
+ self.h[block] = self.h[block].to(cuda_device)
939
+ # ln_f to last
940
+ self.ln_f = self.ln_f.to(self.last_device)
941
+
942
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
943
+ def deparallelize(self):
944
+ warnings.warn(
945
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
946
+ FutureWarning,
947
+ )
948
+ self.model_parallel = False
949
+ self.device_map = None
950
+ self.first_device = "cpu"
951
+ self.last_device = "cpu"
952
+ self.wte = self.wte.to("cpu")
953
+ self.wpe = self.wpe.to("cpu")
954
+ for index in range(len(self.h)):
955
+ self.h[index] = self.h[index].to("cpu")
956
+ self.ln_f = self.ln_f.to("cpu")
957
+ torch.cuda.empty_cache()
958
+
959
+ def get_input_embeddings(self):
960
+ return self.wte
961
+
962
+ def set_input_embeddings(self, new_embeddings):
963
+ self.wte = new_embeddings
964
+
965
+ def _prune_heads(self, heads_to_prune):
966
+ """
967
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
968
+ """
969
+ for layer, heads in heads_to_prune.items():
970
+ self.h[layer].attn.prune_heads(heads)
971
+
972
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
973
+ @add_code_sample_docstrings(
974
+ checkpoint=_CHECKPOINT_FOR_DOC,
975
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
976
+ config_class=_CONFIG_FOR_DOC,
977
+ )
978
+ def forward(
979
+ self,
980
+ input_ids: Optional[torch.LongTensor] = None,
981
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
982
+ attention_mask: Optional[torch.FloatTensor] = None,
983
+ token_type_ids: Optional[torch.LongTensor] = None,
984
+ position_ids: Optional[torch.LongTensor] = None,
985
+ head_mask: Optional[torch.FloatTensor] = None,
986
+ inputs_embeds: Optional[torch.FloatTensor] = None,
987
+ encoder_hidden_states: Optional[torch.Tensor] = None,
988
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
989
+ use_cache: Optional[bool] = None,
990
+ output_attentions: Optional[bool] = None,
991
+ output_hidden_states: Optional[bool] = None,
992
+ return_dict: Optional[bool] = None,
993
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
994
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
995
+ output_hidden_states = (
996
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
997
+ )
998
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
999
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1000
+
1001
+ if input_ids is not None and inputs_embeds is not None:
1002
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1003
+ elif input_ids is not None:
1004
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1005
+ input_shape = input_ids.size()
1006
+ input_ids = input_ids.view(-1, input_shape[-1])
1007
+ batch_size = input_ids.shape[0]
1008
+ elif inputs_embeds is not None:
1009
+ input_shape = inputs_embeds.size()[:-1]
1010
+ batch_size = inputs_embeds.shape[0]
1011
+ else:
1012
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1013
+
1014
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1015
+
1016
+ if token_type_ids is not None:
1017
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
1018
+
1019
+ if past_key_values is None:
1020
+ past_length = 0
1021
+ past_key_values = tuple([None] * len(self.h))
1022
+ else:
1023
+ past_length = past_key_values[0][0].size(-2)
1024
+ if position_ids is None:
1025
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
1026
+ position_ids = position_ids.unsqueeze(0)
1027
+
1028
+ # Attention mask.
1029
+ if attention_mask is not None:
1030
+ attention_mask = attention_mask.view(batch_size, -1)
1031
+ if self._attn_implementation == "flash_attention_2":
1032
+ attention_mask = attention_mask if 0 in attention_mask else None
1033
+ else:
1034
+ # We create a 3D attention mask from a 2D tensor mask.
1035
+ # Sizes are [batch_size, 1, 1, to_seq_length]
1036
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
1037
+ # this attention mask is more simple than the triangular masking of causal attention
1038
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
1039
+ attention_mask = attention_mask[:, None, None, :]
1040
+
1041
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1042
+ # masked positions, this operation will create a tensor which is 0.0 for
1043
+ # positions we want to attend and the dtype's smallest value for masked positions.
1044
+ # Since we are adding it to the raw scores before the softmax, this is
1045
+ # effectively the same as removing these entirely.
1046
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
1047
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
1048
+
1049
+ # If a 2D or 3D attention mask is provided for the cross-attention
1050
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1051
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
1052
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1053
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1054
+ if encoder_attention_mask is None:
1055
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1056
+ if self._attn_implementation != "flash_attention_2":
1057
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1058
+ else:
1059
+ encoder_attention_mask = None
1060
+
1061
+ # Prepare head mask if needed
1062
+ # 1.0 in head_mask indicate we keep the head
1063
+ # attention_probs has shape bsz x n_heads x N x N
1064
+ # head_mask has shape n_layer x batch x n_heads x N x N
1065
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
1066
+
1067
+ if inputs_embeds is None:
1068
+ inputs_embeds = self.wte(input_ids)
1069
+ position_embeds = self.wpe(position_ids)
1070
+ hidden_states = inputs_embeds + position_embeds
1071
+
1072
+ if token_type_ids is not None:
1073
+ token_type_embeds = self.wte(token_type_ids)
1074
+ hidden_states = hidden_states + token_type_embeds
1075
+
1076
+ hidden_states = self.drop(hidden_states)
1077
+
1078
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
1079
+
1080
+ if self.gradient_checkpointing and self.training:
1081
+ if use_cache:
1082
+ logger.warning_once(
1083
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1084
+ )
1085
+ use_cache = False
1086
+
1087
+ presents = () if use_cache else None
1088
+ all_self_attentions = () if output_attentions else None
1089
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
1090
+ all_hidden_states = () if output_hidden_states else None
1091
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
1092
+ # Model parallel
1093
+ if self.model_parallel:
1094
+ torch.cuda.set_device(hidden_states.device)
1095
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
1096
+ if layer_past is not None:
1097
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
1098
+ # Ensure that attention_mask is always on the same device as hidden_states
1099
+ if attention_mask is not None:
1100
+ attention_mask = attention_mask.to(hidden_states.device)
1101
+ if isinstance(head_mask, torch.Tensor):
1102
+ head_mask = head_mask.to(hidden_states.device)
1103
+ if output_hidden_states:
1104
+ all_hidden_states = all_hidden_states + (hidden_states,)
1105
+
1106
+ if self.gradient_checkpointing and self.training:
1107
+ outputs = self._gradient_checkpointing_func(
1108
+ block.__call__,
1109
+ hidden_states,
1110
+ None,
1111
+ attention_mask,
1112
+ head_mask[i],
1113
+ encoder_hidden_states,
1114
+ encoder_attention_mask,
1115
+ use_cache,
1116
+ output_attentions,
1117
+ )
1118
+ else:
1119
+ outputs = block(
1120
+ hidden_states,
1121
+ layer_past=layer_past,
1122
+ attention_mask=attention_mask,
1123
+ head_mask=head_mask[i],
1124
+ encoder_hidden_states=encoder_hidden_states,
1125
+ encoder_attention_mask=encoder_attention_mask,
1126
+ use_cache=use_cache,
1127
+ output_attentions=output_attentions,
1128
+ )
1129
+
1130
+ hidden_states = outputs[0]
1131
+ if use_cache is True:
1132
+ presents = presents + (outputs[1],)
1133
+
1134
+ if output_attentions:
1135
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
1136
+ if self.config.add_cross_attention:
1137
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
1138
+
1139
+ # Model Parallel: If it's the last layer for that device, put things on the next device
1140
+ if self.model_parallel:
1141
+ for k, v in self.device_map.items():
1142
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
1143
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
1144
+
1145
+ hidden_states = self.ln_f(hidden_states)
1146
+
1147
+ hidden_states = hidden_states.view(output_shape)
1148
+ # Add last hidden state
1149
+ if output_hidden_states:
1150
+ all_hidden_states = all_hidden_states + (hidden_states,)
1151
+
1152
+ if not return_dict:
1153
+ return tuple(
1154
+ v
1155
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
1156
+ if v is not None
1157
+ )
1158
+
1159
+ return BaseModelOutputWithPastAndCrossAttentions(
1160
+ last_hidden_state=hidden_states,
1161
+ past_key_values=presents,
1162
+ hidden_states=all_hidden_states,
1163
+ attentions=all_self_attentions,
1164
+ cross_attentions=all_cross_attentions,
1165
+ )
1166
+
1167
+
1168
+ @add_start_docstrings(
1169
+ """
1170
+ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
1171
+ embeddings).
1172
+ """,
1173
+ GPT2_START_DOCSTRING,
1174
+ )
1175
+ class GPT2LMHeadModel(GPT2PreTrainedModel):
1176
+ _tied_weights_keys = ["lm_head.weight"]
1177
+
1178
+ def __init__(self, config):
1179
+ super().__init__(config)
1180
+ self.transformer = GPT2Model(config)
1181
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1182
+
1183
+ # Model parallel
1184
+ self.model_parallel = False
1185
+ self.device_map = None
1186
+
1187
+ # Initialize weights and apply final processing
1188
+ self.post_init()
1189
+
1190
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1191
+ def parallelize(self, device_map=None):
1192
+ warnings.warn(
1193
+ "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
1194
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
1195
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
1196
+ " 0, 'transformer.h.1': 1, ...}",
1197
+ FutureWarning,
1198
+ )
1199
+ self.device_map = (
1200
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1201
+ if device_map is None
1202
+ else device_map
1203
+ )
1204
+ assert_device_map(self.device_map, len(self.transformer.h))
1205
+ self.transformer.parallelize(self.device_map)
1206
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1207
+ self.model_parallel = True
1208
+
1209
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1210
+ def deparallelize(self):
1211
+ warnings.warn(
1212
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
1213
+ FutureWarning,
1214
+ )
1215
+ self.transformer.deparallelize()
1216
+ self.transformer = self.transformer.to("cpu")
1217
+ self.lm_head = self.lm_head.to("cpu")
1218
+ self.model_parallel = False
1219
+ torch.cuda.empty_cache()
1220
+
1221
+ def get_output_embeddings(self):
1222
+ return self.lm_head
1223
+
1224
+ def set_output_embeddings(self, new_embeddings):
1225
+ self.lm_head = new_embeddings
1226
+
1227
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
1228
+ token_type_ids = kwargs.get("token_type_ids", None)
1229
+ # Omit tokens covered by past_key_values
1230
+ if past_key_values:
1231
+ past_length = past_key_values[0][0].shape[2]
1232
+
1233
+ # Some generation methods already pass only the last input ID
1234
+ if input_ids.shape[1] > past_length:
1235
+ remove_prefix_length = past_length
1236
+ else:
1237
+ # Default to old behavior: keep only final ID
1238
+ remove_prefix_length = input_ids.shape[1] - 1
1239
+
1240
+ input_ids = input_ids[:, remove_prefix_length:]
1241
+ if token_type_ids is not None:
1242
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
1243
+
1244
+ attention_mask = kwargs.get("attention_mask", None)
1245
+ position_ids = kwargs.get("position_ids", None)
1246
+
1247
+ if attention_mask is not None and position_ids is None:
1248
+ # create position_ids on the fly for batch generation
1249
+ position_ids = attention_mask.long().cumsum(-1) - 1
1250
+ position_ids.masked_fill_(attention_mask == 0, 1)
1251
+ if past_key_values:
1252
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1253
+ else:
1254
+ position_ids = None
1255
+
1256
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1257
+ if inputs_embeds is not None and past_key_values is None:
1258
+ model_inputs = {"inputs_embeds": inputs_embeds}
1259
+ else:
1260
+ model_inputs = {"input_ids": input_ids}
1261
+
1262
+ model_inputs.update(
1263
+ {
1264
+ "past_key_values": past_key_values,
1265
+ "use_cache": kwargs.get("use_cache"),
1266
+ "position_ids": position_ids,
1267
+ "attention_mask": attention_mask,
1268
+ "token_type_ids": token_type_ids,
1269
+ }
1270
+ )
1271
+
1272
+ return model_inputs
1273
+
1274
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1275
+ @add_code_sample_docstrings(
1276
+ checkpoint=_CHECKPOINT_FOR_DOC,
1277
+ output_type=CausalLMOutputWithCrossAttentions,
1278
+ config_class=_CONFIG_FOR_DOC,
1279
+ )
1280
+ def forward(
1281
+ self,
1282
+ input_ids: Optional[torch.LongTensor] = None,
1283
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1284
+ attention_mask: Optional[torch.FloatTensor] = None,
1285
+ token_type_ids: Optional[torch.LongTensor] = None,
1286
+ position_ids: Optional[torch.LongTensor] = None,
1287
+ head_mask: Optional[torch.FloatTensor] = None,
1288
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1289
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1290
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1291
+ labels: Optional[torch.LongTensor] = None,
1292
+ use_cache: Optional[bool] = None,
1293
+ output_attentions: Optional[bool] = None,
1294
+ output_hidden_states: Optional[bool] = None,
1295
+ return_dict: Optional[bool] = None,
1296
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1297
+ r"""
1298
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1299
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1300
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1301
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1302
+ """
1303
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1304
+
1305
+ transformer_outputs = self.transformer(
1306
+ input_ids,
1307
+ past_key_values=past_key_values,
1308
+ attention_mask=attention_mask,
1309
+ token_type_ids=token_type_ids,
1310
+ position_ids=position_ids,
1311
+ head_mask=head_mask,
1312
+ inputs_embeds=inputs_embeds,
1313
+ encoder_hidden_states=encoder_hidden_states,
1314
+ encoder_attention_mask=encoder_attention_mask,
1315
+ use_cache=use_cache,
1316
+ output_attentions=output_attentions,
1317
+ output_hidden_states=output_hidden_states,
1318
+ return_dict=return_dict,
1319
+ )
1320
+ hidden_states = transformer_outputs[0]
1321
+
1322
+ # Set device for model parallelism
1323
+ if self.model_parallel:
1324
+ torch.cuda.set_device(self.transformer.first_device)
1325
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1326
+
1327
+ lm_logits = self.lm_head(hidden_states)
1328
+
1329
+ loss = None
1330
+ if labels is not None:
1331
+ # move labels to correct device to enable model parallelism
1332
+ labels = labels.to(lm_logits.device)
1333
+ # Shift so that tokens < n predict n
1334
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1335
+ shift_labels = labels[..., 1:].contiguous()
1336
+ # Flatten the tokens
1337
+ loss_fct = CrossEntropyLoss()
1338
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1339
+
1340
+ if not return_dict:
1341
+ output = (lm_logits,) + transformer_outputs[1:]
1342
+ return ((loss,) + output) if loss is not None else output
1343
+
1344
+ return CausalLMOutputWithCrossAttentions(
1345
+ loss=loss,
1346
+ logits=lm_logits,
1347
+ past_key_values=transformer_outputs.past_key_values,
1348
+ hidden_states=transformer_outputs.hidden_states,
1349
+ attentions=transformer_outputs.attentions,
1350
+ cross_attentions=transformer_outputs.cross_attentions,
1351
+ )
1352
+
1353
+ @staticmethod
1354
+ def _reorder_cache(
1355
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1356
+ ) -> Tuple[Tuple[torch.Tensor]]:
1357
+ """
1358
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1359
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1360
+ beam_idx at every generation step.
1361
+ """
1362
+ return tuple(
1363
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1364
+ for layer_past in past_key_values
1365
+ )
1366
+
1367
+
1368
+ @add_start_docstrings(
1369
+ """
1370
+ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
1371
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
1372
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
1373
+ input sequence).
1374
+ """,
1375
+ GPT2_START_DOCSTRING,
1376
+ )
1377
+ class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
1378
+ _tied_weights_keys = ["lm_head.weight"]
1379
+
1380
+ def __init__(self, config):
1381
+ super().__init__(config)
1382
+ config.num_labels = 1
1383
+ self.transformer = GPT2Model(config)
1384
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1385
+ self.multiple_choice_head = SequenceSummary(config)
1386
+
1387
+ # Model parallel
1388
+ self.model_parallel = False
1389
+ self.device_map = None
1390
+
1391
+ # Initialize weights and apply final processing
1392
+ self.post_init()
1393
+
1394
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1395
+ def parallelize(self, device_map=None):
1396
+ warnings.warn(
1397
+ "`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should"
1398
+ " load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your"
1399
+ " own `device_map` but it needs to be a dictionary module_name to device, so for instance"
1400
+ " {'transformer.h.0': 0, 'transformer.h.1': 1, ...}",
1401
+ FutureWarning,
1402
+ )
1403
+ self.device_map = (
1404
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1405
+ if device_map is None
1406
+ else device_map
1407
+ )
1408
+ assert_device_map(self.device_map, len(self.transformer.h))
1409
+ self.transformer.parallelize(self.device_map)
1410
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1411
+ self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device)
1412
+ self.model_parallel = True
1413
+
1414
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1415
+ def deparallelize(self):
1416
+ warnings.warn(
1417
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
1418
+ FutureWarning,
1419
+ )
1420
+ self.transformer.deparallelize()
1421
+ self.transformer = self.transformer.to("cpu")
1422
+ self.lm_head = self.lm_head.to("cpu")
1423
+ self.multiple_choice_head = self.multiple_choice_head.to("cpu")
1424
+ self.model_parallel = False
1425
+ torch.cuda.empty_cache()
1426
+
1427
+ def get_output_embeddings(self):
1428
+ return self.lm_head
1429
+
1430
+ def set_output_embeddings(self, new_embeddings):
1431
+ self.lm_head = new_embeddings
1432
+
1433
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
1434
+ token_type_ids = kwargs.get("token_type_ids", None)
1435
+ # Omit tokens covered by past_key_values
1436
+ if past_key_values:
1437
+ past_length = past_key_values[0][0].shape[2]
1438
+
1439
+ # Some generation methods already pass only the last input ID
1440
+ if input_ids.shape[1] > past_length:
1441
+ remove_prefix_length = past_length
1442
+ else:
1443
+ # Default to old behavior: keep only final ID
1444
+ remove_prefix_length = input_ids.shape[1] - 1
1445
+
1446
+ input_ids = input_ids[:, remove_prefix_length:]
1447
+ if token_type_ids is not None:
1448
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
1449
+
1450
+ attention_mask = kwargs.get("attention_mask", None)
1451
+ position_ids = kwargs.get("position_ids", None)
1452
+
1453
+ if attention_mask is not None and position_ids is None:
1454
+ # create position_ids on the fly for batch generation
1455
+ position_ids = attention_mask.long().cumsum(-1) - 1
1456
+ position_ids.masked_fill_(attention_mask == 0, 1)
1457
+ if past_key_values:
1458
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1459
+ else:
1460
+ position_ids = None
1461
+
1462
+ return {
1463
+ "input_ids": input_ids,
1464
+ "past_key_values": past_key_values,
1465
+ "use_cache": kwargs.get("use_cache"),
1466
+ "position_ids": position_ids,
1467
+ "attention_mask": attention_mask,
1468
+ "token_type_ids": token_type_ids,
1469
+ }
1470
+
1471
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1472
+ @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
1473
+ def forward(
1474
+ self,
1475
+ input_ids: Optional[torch.LongTensor] = None,
1476
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1477
+ attention_mask: Optional[torch.FloatTensor] = None,
1478
+ token_type_ids: Optional[torch.LongTensor] = None,
1479
+ position_ids: Optional[torch.LongTensor] = None,
1480
+ head_mask: Optional[torch.FloatTensor] = None,
1481
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1482
+ mc_token_ids: Optional[torch.LongTensor] = None,
1483
+ labels: Optional[torch.LongTensor] = None,
1484
+ mc_labels: Optional[torch.LongTensor] = None,
1485
+ use_cache: Optional[bool] = None,
1486
+ output_attentions: Optional[bool] = None,
1487
+ output_hidden_states: Optional[bool] = None,
1488
+ return_dict: Optional[bool] = None,
1489
+ **kwargs,
1490
+ ) -> Union[Tuple, GPT2DoubleHeadsModelOutput]:
1491
+ r"""
1492
+ mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
1493
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1494
+ 1]`.
1495
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1496
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1497
+ `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to
1498
+ `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`
1499
+ mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
1500
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1501
+ where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
1502
+
1503
+ Return:
1504
+
1505
+ Example:
1506
+
1507
+ ```python
1508
+ >>> import torch
1509
+ >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel
1510
+
1511
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
1512
+ >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2")
1513
+
1514
+ >>> # Add a [CLS] to the vocabulary (we should train it also!)
1515
+ >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
1516
+ >>> # Update the model embeddings with the new vocabulary size
1517
+ >>> embedding_layer = model.resize_token_embeddings(len(tokenizer))
1518
+
1519
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
1520
+ >>> encoded_choices = [tokenizer.encode(s) for s in choices]
1521
+ >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
1522
+
1523
+ >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
1524
+ >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
1525
+
1526
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
1527
+ >>> lm_logits = outputs.logits
1528
+ >>> mc_logits = outputs.mc_logits
1529
+ ```"""
1530
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1531
+
1532
+ transformer_outputs = self.transformer(
1533
+ input_ids,
1534
+ past_key_values=past_key_values,
1535
+ attention_mask=attention_mask,
1536
+ token_type_ids=token_type_ids,
1537
+ position_ids=position_ids,
1538
+ head_mask=head_mask,
1539
+ inputs_embeds=inputs_embeds,
1540
+ use_cache=use_cache,
1541
+ output_attentions=output_attentions,
1542
+ output_hidden_states=output_hidden_states,
1543
+ return_dict=return_dict,
1544
+ )
1545
+
1546
+ hidden_states = transformer_outputs[0]
1547
+
1548
+ # Set device for model parallelism
1549
+ if self.model_parallel:
1550
+ torch.cuda.set_device(self.transformer.first_device)
1551
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1552
+
1553
+ lm_logits = self.lm_head(hidden_states)
1554
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
1555
+
1556
+ mc_loss = None
1557
+ if mc_labels is not None:
1558
+ loss_fct = CrossEntropyLoss()
1559
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
1560
+ lm_loss = None
1561
+ if labels is not None:
1562
+ labels = labels.to(lm_logits.device)
1563
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1564
+ shift_labels = labels[..., 1:].contiguous()
1565
+ loss_fct = CrossEntropyLoss()
1566
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1567
+
1568
+ if not return_dict:
1569
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
1570
+ if mc_loss is not None:
1571
+ output = (mc_loss,) + output
1572
+ return ((lm_loss,) + output) if lm_loss is not None else output
1573
+
1574
+ return GPT2DoubleHeadsModelOutput(
1575
+ loss=lm_loss,
1576
+ mc_loss=mc_loss,
1577
+ logits=lm_logits,
1578
+ mc_logits=mc_logits,
1579
+ past_key_values=transformer_outputs.past_key_values,
1580
+ hidden_states=transformer_outputs.hidden_states,
1581
+ attentions=transformer_outputs.attentions,
1582
+ )
1583
+
1584
+ @staticmethod
1585
+ def _reorder_cache(
1586
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1587
+ ) -> Tuple[Tuple[torch.Tensor]]:
1588
+ """
1589
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1590
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1591
+ beam_idx at every generation step.
1592
+ """
1593
+ return tuple(
1594
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1595
+ for layer_past in past_key_values
1596
+ )
1597
+
1598
+
1599
+ @add_start_docstrings(
1600
+ """
1601
+ The GPT2 Model transformer with a sequence classification head on top (linear layer).
1602
+
1603
+ [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1604
+ (e.g. GPT-1) do.
1605
+
1606
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1607
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1608
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1609
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1610
+ each row of the batch).
1611
+ """,
1612
+ GPT2_START_DOCSTRING,
1613
+ )
1614
+ class GPT2ForSequenceClassification(GPT2PreTrainedModel):
1615
+ def __init__(self, config):
1616
+ super().__init__(config)
1617
+ self.num_labels = config.num_labels
1618
+ self.transformer = GPT2Model(config)
1619
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1620
+
1621
+ # Model parallel
1622
+ self.model_parallel = False
1623
+ self.device_map = None
1624
+
1625
+ # Initialize weights and apply final processing
1626
+ self.post_init()
1627
+
1628
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1629
+ @add_code_sample_docstrings(
1630
+ checkpoint="microsoft/DialogRPT-updown",
1631
+ output_type=SequenceClassifierOutputWithPast,
1632
+ config_class=_CONFIG_FOR_DOC,
1633
+ )
1634
+ def forward(
1635
+ self,
1636
+ input_ids: Optional[torch.LongTensor] = None,
1637
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1638
+ attention_mask: Optional[torch.FloatTensor] = None,
1639
+ token_type_ids: Optional[torch.LongTensor] = None,
1640
+ position_ids: Optional[torch.LongTensor] = None,
1641
+ head_mask: Optional[torch.FloatTensor] = None,
1642
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1643
+ labels: Optional[torch.LongTensor] = None,
1644
+ use_cache: Optional[bool] = None,
1645
+ output_attentions: Optional[bool] = None,
1646
+ output_hidden_states: Optional[bool] = None,
1647
+ return_dict: Optional[bool] = None,
1648
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1649
+ r"""
1650
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1651
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1652
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1653
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1654
+ """
1655
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1656
+
1657
+ transformer_outputs = self.transformer(
1658
+ input_ids,
1659
+ past_key_values=past_key_values,
1660
+ attention_mask=attention_mask,
1661
+ token_type_ids=token_type_ids,
1662
+ position_ids=position_ids,
1663
+ head_mask=head_mask,
1664
+ inputs_embeds=inputs_embeds,
1665
+ use_cache=use_cache,
1666
+ output_attentions=output_attentions,
1667
+ output_hidden_states=output_hidden_states,
1668
+ return_dict=return_dict,
1669
+ )
1670
+ hidden_states = transformer_outputs[0]
1671
+ logits = self.score(hidden_states)
1672
+
1673
+ if input_ids is not None:
1674
+ batch_size, sequence_length = input_ids.shape[:2]
1675
+ else:
1676
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1677
+
1678
+ assert (
1679
+ self.config.pad_token_id is not None or batch_size == 1
1680
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1681
+ if self.config.pad_token_id is None:
1682
+ sequence_lengths = -1
1683
+ else:
1684
+ if input_ids is not None:
1685
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1686
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1687
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1688
+ sequence_lengths = sequence_lengths.to(logits.device)
1689
+ else:
1690
+ sequence_lengths = -1
1691
+ logger.warning(
1692
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1693
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1694
+ )
1695
+
1696
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1697
+
1698
+ loss = None
1699
+ if labels is not None:
1700
+ if self.config.problem_type is None:
1701
+ if self.num_labels == 1:
1702
+ self.config.problem_type = "regression"
1703
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1704
+ self.config.problem_type = "single_label_classification"
1705
+ else:
1706
+ self.config.problem_type = "multi_label_classification"
1707
+
1708
+ if self.config.problem_type == "regression":
1709
+ loss_fct = MSELoss()
1710
+ if self.num_labels == 1:
1711
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1712
+ else:
1713
+ loss = loss_fct(pooled_logits, labels)
1714
+ elif self.config.problem_type == "single_label_classification":
1715
+ loss_fct = CrossEntropyLoss()
1716
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1717
+ elif self.config.problem_type == "multi_label_classification":
1718
+ loss_fct = BCEWithLogitsLoss()
1719
+ loss = loss_fct(pooled_logits, labels)
1720
+ if not return_dict:
1721
+ output = (pooled_logits,) + transformer_outputs[1:]
1722
+ return ((loss,) + output) if loss is not None else output
1723
+
1724
+ return SequenceClassifierOutputWithPast(
1725
+ loss=loss,
1726
+ logits=pooled_logits,
1727
+ past_key_values=transformer_outputs.past_key_values,
1728
+ hidden_states=transformer_outputs.hidden_states,
1729
+ attentions=transformer_outputs.attentions,
1730
+ )
1731
+
1732
+
1733
+ @add_start_docstrings(
1734
+ """
1735
+ GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1736
+ Named-Entity-Recognition (NER) tasks.
1737
+ """,
1738
+ GPT2_START_DOCSTRING,
1739
+ )
1740
+ class GPT2ForTokenClassification(GPT2PreTrainedModel):
1741
+ def __init__(self, config):
1742
+ super().__init__(config)
1743
+ self.num_labels = config.num_labels
1744
+
1745
+ self.transformer = GPT2Model(config)
1746
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1747
+ classifier_dropout = config.classifier_dropout
1748
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1749
+ classifier_dropout = config.hidden_dropout
1750
+ else:
1751
+ classifier_dropout = 0.1
1752
+ self.dropout = nn.Dropout(classifier_dropout)
1753
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1754
+
1755
+ # Model parallel
1756
+ self.model_parallel = False
1757
+ self.device_map = None
1758
+
1759
+ # Initialize weights and apply final processing
1760
+ self.post_init()
1761
+
1762
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1763
+ # fmt: off
1764
+ @add_code_sample_docstrings(
1765
+ checkpoint="brad1141/gpt2-finetuned-comp2",
1766
+ output_type=TokenClassifierOutput,
1767
+ config_class=_CONFIG_FOR_DOC,
1768
+ expected_loss=0.25,
1769
+ expected_output=[
1770
+ "Lead",
1771
+ "Lead",
1772
+ "Lead",
1773
+ "Position",
1774
+ "Lead",
1775
+ "Lead",
1776
+ "Lead",
1777
+ "Lead",
1778
+ "Lead",
1779
+ "Lead",
1780
+ "Lead",
1781
+ "Lead",
1782
+ ],
1783
+ )
1784
+ # fmt: on
1785
+ def forward(
1786
+ self,
1787
+ input_ids: Optional[torch.LongTensor] = None,
1788
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1789
+ attention_mask: Optional[torch.FloatTensor] = None,
1790
+ token_type_ids: Optional[torch.LongTensor] = None,
1791
+ position_ids: Optional[torch.LongTensor] = None,
1792
+ head_mask: Optional[torch.FloatTensor] = None,
1793
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1794
+ labels: Optional[torch.LongTensor] = None,
1795
+ use_cache: Optional[bool] = None,
1796
+ output_attentions: Optional[bool] = None,
1797
+ output_hidden_states: Optional[bool] = None,
1798
+ return_dict: Optional[bool] = None,
1799
+ ) -> Union[Tuple, TokenClassifierOutput]:
1800
+ r"""
1801
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1802
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1803
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1804
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1805
+ """
1806
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1807
+
1808
+ transformer_outputs = self.transformer(
1809
+ input_ids,
1810
+ past_key_values=past_key_values,
1811
+ attention_mask=attention_mask,
1812
+ token_type_ids=token_type_ids,
1813
+ position_ids=position_ids,
1814
+ head_mask=head_mask,
1815
+ inputs_embeds=inputs_embeds,
1816
+ use_cache=use_cache,
1817
+ output_attentions=output_attentions,
1818
+ output_hidden_states=output_hidden_states,
1819
+ return_dict=return_dict,
1820
+ )
1821
+
1822
+ hidden_states = transformer_outputs[0]
1823
+ hidden_states = self.dropout(hidden_states)
1824
+ logits = self.classifier(hidden_states)
1825
+
1826
+ loss = None
1827
+ if labels is not None:
1828
+ labels = labels.to(logits.device)
1829
+ loss_fct = CrossEntropyLoss()
1830
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1831
+
1832
+ if not return_dict:
1833
+ output = (logits,) + transformer_outputs[2:]
1834
+ return ((loss,) + output) if loss is not None else output
1835
+
1836
+ return TokenClassifierOutput(
1837
+ loss=loss,
1838
+ logits=logits,
1839
+ hidden_states=transformer_outputs.hidden_states,
1840
+ attentions=transformer_outputs.attentions,
1841
+ )
1842
+
1843
+
1844
+ @add_start_docstrings(
1845
+ """
1846
+ The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like
1847
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1848
+ """,
1849
+ GPT2_START_DOCSTRING,
1850
+ )
1851
+ class GPT2ForQuestionAnswering(GPT2PreTrainedModel):
1852
+ def __init__(self, config):
1853
+ super().__init__(config)
1854
+ self.num_labels = config.num_labels
1855
+ self.transformer = GPT2Model(config)
1856
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1857
+
1858
+ # Model parallel
1859
+ self.model_parallel = False
1860
+ self.device_map = None
1861
+
1862
+ # Initialize weights and apply final processing
1863
+ self.post_init()
1864
+
1865
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1866
+ @add_code_sample_docstrings(
1867
+ checkpoint=_CHECKPOINT_FOR_DOC,
1868
+ output_type=QuestionAnsweringModelOutput,
1869
+ config_class=_CONFIG_FOR_DOC,
1870
+ real_checkpoint=_CHECKPOINT_FOR_DOC,
1871
+ )
1872
+ def forward(
1873
+ self,
1874
+ input_ids: Optional[torch.LongTensor] = None,
1875
+ attention_mask: Optional[torch.FloatTensor] = None,
1876
+ token_type_ids: Optional[torch.LongTensor] = None,
1877
+ position_ids: Optional[torch.LongTensor] = None,
1878
+ head_mask: Optional[torch.FloatTensor] = None,
1879
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1880
+ start_positions: Optional[torch.LongTensor] = None,
1881
+ end_positions: Optional[torch.LongTensor] = None,
1882
+ output_attentions: Optional[bool] = None,
1883
+ output_hidden_states: Optional[bool] = None,
1884
+ return_dict: Optional[bool] = None,
1885
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1886
+ r"""
1887
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1888
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1889
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1890
+ are not taken into account for computing the loss.
1891
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1892
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1893
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1894
+ are not taken into account for computing the loss.
1895
+ """
1896
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1897
+
1898
+ outputs = self.transformer(
1899
+ input_ids,
1900
+ attention_mask=attention_mask,
1901
+ token_type_ids=token_type_ids,
1902
+ position_ids=position_ids,
1903
+ head_mask=head_mask,
1904
+ inputs_embeds=inputs_embeds,
1905
+ output_attentions=output_attentions,
1906
+ output_hidden_states=output_hidden_states,
1907
+ return_dict=return_dict,
1908
+ )
1909
+
1910
+ sequence_output = outputs[0]
1911
+
1912
+ logits = self.qa_outputs(sequence_output)
1913
+ start_logits, end_logits = logits.split(1, dim=-1)
1914
+ start_logits = start_logits.squeeze(-1).contiguous()
1915
+ end_logits = end_logits.squeeze(-1).contiguous()
1916
+
1917
+ total_loss = None
1918
+ if start_positions is not None and end_positions is not None:
1919
+ # If we are on multi-GPU, split add a dimension
1920
+ if len(start_positions.size()) > 1:
1921
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1922
+ if len(end_positions.size()) > 1:
1923
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1924
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1925
+ ignored_index = start_logits.size(1)
1926
+ start_positions = start_positions.clamp(0, ignored_index)
1927
+ end_positions = end_positions.clamp(0, ignored_index)
1928
+
1929
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1930
+ start_loss = loss_fct(start_logits, start_positions)
1931
+ end_loss = loss_fct(end_logits, end_positions)
1932
+ total_loss = (start_loss + end_loss) / 2
1933
+
1934
+ if not return_dict:
1935
+ output = (start_logits, end_logits) + outputs[2:]
1936
+ return ((total_loss,) + output) if total_loss is not None else output
1937
+
1938
+ return QuestionAnsweringModelOutput(
1939
+ loss=total_loss,
1940
+ start_logits=start_logits,
1941
+ end_logits=end_logits,
1942
+ hidden_states=outputs.hidden_states,
1943
+ attentions=outputs.attentions,
1944
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/modeling_tf_gpt2.py ADDED
@@ -0,0 +1,1238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 OpenAI GPT-2 model."""
17
+
18
+ from __future__ import annotations
19
+
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutputWithPastAndCrossAttentions,
29
+ TFCausalLMOutputWithCrossAttentions,
30
+ TFSequenceClassifierOutputWithPast,
31
+ )
32
+ from ...modeling_tf_utils import (
33
+ TFCausalLanguageModelingLoss,
34
+ TFConv1D,
35
+ TFModelInputType,
36
+ TFPreTrainedModel,
37
+ TFSequenceClassificationLoss,
38
+ TFSequenceSummary,
39
+ get_initializer,
40
+ keras,
41
+ keras_serializable,
42
+ unpack_inputs,
43
+ )
44
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
45
+ from ...utils import (
46
+ ModelOutput,
47
+ add_code_sample_docstrings,
48
+ add_start_docstrings,
49
+ add_start_docstrings_to_model_forward,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from .configuration_gpt2 import GPT2Config
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ _CHECKPOINT_FOR_DOC = "openai-community/gpt2"
59
+ _CONFIG_FOR_DOC = "GPT2Config"
60
+
61
+
62
+ from ..deprecated._archive_maps import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
63
+
64
+
65
+ class TFAttention(keras.layers.Layer):
66
+ def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs):
67
+ super().__init__(**kwargs)
68
+
69
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
70
+ # [switch nx => n_state from Block to Attention to keep identical to TF implementation]
71
+ assert n_state % config.n_head == 0
72
+ self.n_head = config.n_head
73
+ self.split_size = n_state
74
+ self.scale = scale
75
+ self.output_attentions = config.output_attentions
76
+
77
+ self.is_cross_attention = is_cross_attention
78
+
79
+ if self.is_cross_attention:
80
+ self.c_attn = TFConv1D(n_state * 2, nx, initializer_range=config.initializer_range, name="c_attn")
81
+ self.q_attn = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="q_attn")
82
+ else:
83
+ self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
84
+
85
+ self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
86
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
87
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
88
+ self.pruned_heads = set()
89
+ self.embed_dim = n_state
90
+
91
+ def prune_heads(self, heads):
92
+ pass
93
+
94
+ @staticmethod
95
+ def causal_attention_mask(nd, ns, dtype):
96
+ """
97
+ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]),
98
+ -1, ns-nd), but doesn't produce garbage on TPUs.
99
+ """
100
+ i = tf.range(nd)[:, None]
101
+ j = tf.range(ns)
102
+ m = i >= j - ns + nd
103
+ return tf.cast(m, dtype)
104
+
105
+ def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False):
106
+ # q, k, v have shape [batch, heads, sequence, features]
107
+ w = tf.matmul(q, k, transpose_b=True)
108
+ if self.scale:
109
+ dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores
110
+ w = w / tf.math.sqrt(dk)
111
+
112
+ if not self.is_cross_attention:
113
+ # if only "normal" attention layer implements causal mask
114
+
115
+ # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
116
+ _, _, nd, ns = shape_list(w)
117
+ b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
118
+ b = tf.reshape(b, [1, 1, nd, ns])
119
+ w = w * b - 1e4 * (1 - b)
120
+
121
+ if attention_mask is not None:
122
+ # Apply the attention mask
123
+ attention_mask = tf.cast(attention_mask, dtype=w.dtype)
124
+ w = w + attention_mask
125
+
126
+ w = stable_softmax(w, axis=-1)
127
+ w = self.attn_dropout(w, training=training)
128
+
129
+ # Mask heads if we want to
130
+ if head_mask is not None:
131
+ w = w * head_mask
132
+
133
+ outputs = [tf.matmul(w, v)]
134
+ if output_attentions:
135
+ outputs.append(w)
136
+ return outputs
137
+
138
+ def merge_heads(self, x):
139
+ x = tf.transpose(x, [0, 2, 1, 3])
140
+ x_shape = shape_list(x)
141
+ new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
142
+ return tf.reshape(x, new_x_shape)
143
+
144
+ def split_heads(self, x):
145
+ x_shape = shape_list(x)
146
+ new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
147
+ x = tf.reshape(x, new_x_shape)
148
+ return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
149
+
150
+ def call(
151
+ self,
152
+ x,
153
+ layer_past,
154
+ attention_mask,
155
+ head_mask,
156
+ encoder_hidden_states,
157
+ encoder_attention_mask,
158
+ use_cache,
159
+ output_attentions,
160
+ training=False,
161
+ ):
162
+ if encoder_hidden_states is not None:
163
+ if not hasattr(self, "q_attn"):
164
+ raise ValueError(
165
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
166
+ "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
167
+ )
168
+
169
+ query = self.q_attn(x)
170
+ kv_out = self.c_attn(encoder_hidden_states)
171
+ key, value = tf.split(kv_out, 2, axis=2)
172
+ attention_mask = encoder_attention_mask
173
+ else:
174
+ x = self.c_attn(x)
175
+ query, key, value = tf.split(x, 3, axis=2)
176
+
177
+ query = self.split_heads(query)
178
+ key = self.split_heads(key)
179
+ value = self.split_heads(value)
180
+ if layer_past is not None:
181
+ past_key, past_value = tf.unstack(layer_past, axis=0, num=2)
182
+ key = tf.concat([past_key, key], axis=-2)
183
+ value = tf.concat([past_value, value], axis=-2)
184
+
185
+ # to cope with keras serialization
186
+ if use_cache:
187
+ present = tf.stack([key, value], axis=0)
188
+ else:
189
+ present = (None,)
190
+
191
+ attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training)
192
+ a = attn_outputs[0]
193
+
194
+ a = self.merge_heads(a)
195
+ a = self.c_proj(a)
196
+ a = self.resid_dropout(a, training=training)
197
+
198
+ outputs = [a, present] + attn_outputs[1:]
199
+ return outputs # a, present, (attentions)
200
+
201
+ def build(self, input_shape=None):
202
+ if self.built:
203
+ return
204
+ self.built = True
205
+ if self.is_cross_attention:
206
+ c_attn_shape = 2 * self.embed_dim
207
+ else:
208
+ c_attn_shape = 3 * self.embed_dim
209
+ if getattr(self, "c_proj", None) is not None:
210
+ with tf.name_scope(self.c_proj.name):
211
+ self.c_proj.build([None, None, self.embed_dim])
212
+ if getattr(self, "c_attn", None) is not None:
213
+ with tf.name_scope(self.c_attn.name):
214
+ self.c_attn.build([None, None, c_attn_shape])
215
+ if getattr(self, "q_attn", None) is not None:
216
+ with tf.name_scope(self.q_attn.name):
217
+ self.q_attn.build([None, None, self.embed_dim])
218
+
219
+
220
+ class TFMLP(keras.layers.Layer):
221
+ def __init__(self, n_state, config, **kwargs):
222
+ super().__init__(**kwargs)
223
+ nx = config.n_embd
224
+ self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
225
+ self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
226
+ self.act = get_tf_activation(config.activation_function)
227
+ self.dropout = keras.layers.Dropout(config.resid_pdrop)
228
+ self.intermediate_size = n_state
229
+ self.embed_dim = nx
230
+
231
+ def call(self, x, training=False):
232
+ h = self.act(self.c_fc(x))
233
+ h2 = self.c_proj(h)
234
+ h2 = self.dropout(h2, training=training)
235
+ return h2
236
+
237
+ def build(self, input_shape=None):
238
+ if self.built:
239
+ return
240
+ self.built = True
241
+ if getattr(self, "c_fc", None) is not None:
242
+ with tf.name_scope(self.c_fc.name):
243
+ self.c_fc.build([None, None, self.intermediate_size])
244
+ if getattr(self, "c_proj", None) is not None:
245
+ with tf.name_scope(self.c_proj.name):
246
+ self.c_proj.build([None, None, self.embed_dim])
247
+
248
+
249
+ class TFBlock(keras.layers.Layer):
250
+ def __init__(self, config, scale=False, **kwargs):
251
+ super().__init__(**kwargs)
252
+ nx = config.n_embd
253
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * nx
254
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
255
+ self.attn = TFAttention(nx, config, scale, name="attn")
256
+ self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
257
+
258
+ if config.add_cross_attention:
259
+ self.crossattention = TFAttention(nx, config, scale, name="crossattention", is_cross_attention=True)
260
+ self.ln_cross_attn = keras.layers.LayerNormalization(
261
+ epsilon=config.layer_norm_epsilon, name="ln_cross_attn"
262
+ )
263
+
264
+ self.mlp = TFMLP(inner_dim, config, name="mlp")
265
+ self.hidden_size = config.hidden_size
266
+
267
+ def call(
268
+ self,
269
+ x,
270
+ layer_past,
271
+ attention_mask,
272
+ head_mask,
273
+ encoder_hidden_states,
274
+ encoder_attention_mask,
275
+ use_cache,
276
+ output_attentions,
277
+ training=False,
278
+ ):
279
+ a = self.ln_1(x)
280
+ output_attn = self.attn(
281
+ a,
282
+ layer_past=layer_past,
283
+ attention_mask=attention_mask,
284
+ head_mask=head_mask,
285
+ encoder_hidden_states=None,
286
+ encoder_attention_mask=None,
287
+ use_cache=use_cache,
288
+ output_attentions=output_attentions,
289
+ training=training,
290
+ )
291
+ a = output_attn[0] # output_attn: a, present, (attentions)
292
+ outputs = output_attn[1:]
293
+ x = x + a
294
+
295
+ # Cross-Attention Block
296
+ if encoder_hidden_states is not None:
297
+ # add one self-attention block for cross-attention
298
+ if not hasattr(self, "crossattention"):
299
+ raise ValueError(
300
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
301
+ "cross-attention layers by setting `config.add_cross_attention=True`"
302
+ )
303
+
304
+ ca = self.ln_cross_attn(x)
305
+ output_cross_attn = self.crossattention(
306
+ ca,
307
+ layer_past=None,
308
+ attention_mask=attention_mask,
309
+ head_mask=head_mask,
310
+ encoder_hidden_states=encoder_hidden_states,
311
+ encoder_attention_mask=encoder_attention_mask,
312
+ use_cache=False,
313
+ output_attentions=output_attentions,
314
+ training=training,
315
+ )
316
+ ca = output_cross_attn[0] # output_attn: a, present, (cross_attentions)
317
+ x = x + ca
318
+ outputs = outputs + output_cross_attn[2:] # add cross attentions if we output attention weights
319
+
320
+ m = self.ln_2(x)
321
+ m = self.mlp(m, training=training)
322
+ x = x + m
323
+
324
+ outputs = [x] + outputs
325
+ return outputs # x, present, (attentions, cross_attentions)
326
+
327
+ def build(self, input_shape=None):
328
+ if self.built:
329
+ return
330
+ self.built = True
331
+ if getattr(self, "ln_1", None) is not None:
332
+ with tf.name_scope(self.ln_1.name):
333
+ self.ln_1.build([None, None, self.hidden_size])
334
+ if getattr(self, "attn", None) is not None:
335
+ with tf.name_scope(self.attn.name):
336
+ self.attn.build(None)
337
+ if getattr(self, "ln_2", None) is not None:
338
+ with tf.name_scope(self.ln_2.name):
339
+ self.ln_2.build([None, None, self.hidden_size])
340
+ if getattr(self, "mlp", None) is not None:
341
+ with tf.name_scope(self.mlp.name):
342
+ self.mlp.build(None)
343
+ if getattr(self, "crossattention", None) is not None:
344
+ with tf.name_scope(self.crossattention.name):
345
+ self.crossattention.build(None)
346
+ if getattr(self, "ln_cross_attn", None) is not None:
347
+ with tf.name_scope(self.ln_cross_attn.name):
348
+ self.ln_cross_attn.build([None, None, self.hidden_size])
349
+
350
+
351
+ @keras_serializable
352
+ class TFGPT2MainLayer(keras.layers.Layer):
353
+ config_class = GPT2Config
354
+
355
+ def __init__(self, config, *inputs, **kwargs):
356
+ super().__init__(*inputs, **kwargs)
357
+
358
+ self.config = config
359
+ self.output_attentions = config.output_attentions
360
+ self.output_hidden_states = config.output_hidden_states
361
+ self.use_cache = config.use_cache
362
+ self.return_dict = config.use_return_dict
363
+
364
+ self.num_hidden_layers = config.n_layer
365
+ self.n_embd = config.n_embd
366
+ self.n_positions = config.n_positions
367
+ self.initializer_range = config.initializer_range
368
+
369
+ self.wte = keras.layers.Embedding(
370
+ input_dim=config.vocab_size,
371
+ output_dim=config.hidden_size,
372
+ embeddings_initializer=get_initializer(config.initializer_range),
373
+ name="wte",
374
+ )
375
+ self.wpe = keras.layers.Embedding(
376
+ input_dim=config.n_positions,
377
+ output_dim=config.n_embd,
378
+ embeddings_initializer=get_initializer(config.initializer_range),
379
+ name="wpe",
380
+ )
381
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
382
+ self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)]
383
+ self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
384
+ self.embed_dim = config.hidden_size
385
+
386
+ def get_input_embeddings(self):
387
+ return self.wte
388
+
389
+ def set_input_embeddings(self, new_embeddings):
390
+ self.wte = new_embeddings
391
+
392
+ def _prune_heads(self, heads_to_prune):
393
+ """
394
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
395
+ """
396
+ raise NotImplementedError
397
+
398
+ @unpack_inputs
399
+ def call(
400
+ self,
401
+ input_ids: TFModelInputType | None = None,
402
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
403
+ attention_mask: np.ndarray | tf.Tensor | None = None,
404
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
405
+ position_ids: np.ndarray | tf.Tensor | None = None,
406
+ head_mask: np.ndarray | tf.Tensor | None = None,
407
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
408
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
409
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
410
+ use_cache: Optional[bool] = None,
411
+ output_attentions: Optional[bool] = None,
412
+ output_hidden_states: Optional[bool] = None,
413
+ return_dict: Optional[bool] = None,
414
+ training: Optional[bool] = False,
415
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
416
+ if input_ids is not None and inputs_embeds is not None:
417
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
418
+ elif input_ids is not None:
419
+ input_shape = shape_list(input_ids)
420
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
421
+ elif inputs_embeds is not None:
422
+ input_shape = shape_list(inputs_embeds)[:-1]
423
+ else:
424
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
425
+
426
+ if past_key_values is None:
427
+ past_length = 0
428
+ past_key_values = [None] * len(self.h)
429
+ else:
430
+ past_length = shape_list(past_key_values[0][0])[-2]
431
+
432
+ if position_ids is None:
433
+ position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
434
+
435
+ if attention_mask is not None:
436
+ # We create a 3D attention mask from a 2D tensor mask.
437
+ # Sizes are [batch_size, 1, 1, to_seq_length]
438
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
439
+ # this attention mask is more simple than the triangular masking of causal attention
440
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
441
+ attention_mask_shape = shape_list(attention_mask)
442
+ attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
443
+
444
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
445
+ # masked positions, this operation will create a tensor which is 0.0 for
446
+ # positions we want to attend and -10000.0 for masked positions.
447
+ # Since we are adding it to the raw scores before the softmax, this is
448
+ # effectively the same as removing these entirely.
449
+ one_cst = tf.constant(1.0)
450
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
451
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
452
+
453
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
454
+ if self.config.add_cross_attention and encoder_attention_mask is not None:
455
+ # If a 2D ou 3D attention mask is provided for the cross-attention
456
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
457
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
458
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=encoder_hidden_states.dtype)
459
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
460
+ if num_dims_encoder_attention_mask == 3:
461
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
462
+ if num_dims_encoder_attention_mask == 2:
463
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
464
+
465
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
466
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
467
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
468
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
469
+
470
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
471
+ else:
472
+ encoder_extended_attention_mask = None
473
+
474
+ encoder_attention_mask = encoder_extended_attention_mask
475
+
476
+ # Prepare head mask if needed
477
+ # 1.0 in head_mask indicate we keep the head
478
+ # attention_probs has shape bsz x n_heads x N x N
479
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
480
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
481
+ if head_mask is not None:
482
+ raise NotImplementedError
483
+ else:
484
+ head_mask = [None] * self.num_hidden_layers
485
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
486
+
487
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
488
+
489
+ if inputs_embeds is None:
490
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
491
+ inputs_embeds = self.wte(input_ids)
492
+
493
+ position_embeds = self.wpe(position_ids)
494
+
495
+ if token_type_ids is not None:
496
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
497
+ token_type_embeds = self.wte(token_type_ids)
498
+ else:
499
+ token_type_embeds = tf.constant(0.0)
500
+
501
+ position_embeds = tf.cast(position_embeds, dtype=inputs_embeds.dtype)
502
+ token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
503
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
504
+ hidden_states = self.drop(hidden_states, training=training)
505
+
506
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
507
+
508
+ presents = () if use_cache else None
509
+ all_attentions = () if output_attentions else None
510
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
511
+ all_hidden_states = () if output_hidden_states else None
512
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
513
+ if output_hidden_states:
514
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
515
+
516
+ outputs = block(
517
+ hidden_states,
518
+ layer_past,
519
+ attention_mask,
520
+ head_mask[i],
521
+ encoder_hidden_states,
522
+ encoder_attention_mask,
523
+ use_cache,
524
+ output_attentions,
525
+ training=training,
526
+ )
527
+
528
+ hidden_states, present = outputs[:2]
529
+ if use_cache:
530
+ presents = presents + (present,)
531
+
532
+ if output_attentions:
533
+ all_attentions = all_attentions + (outputs[2],)
534
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
535
+ all_cross_attentions = all_cross_attentions + (outputs[3],)
536
+
537
+ hidden_states = self.ln_f(hidden_states)
538
+
539
+ hidden_states = tf.reshape(hidden_states, output_shape)
540
+ # Add last hidden state
541
+ if output_hidden_states:
542
+ all_hidden_states = all_hidden_states + (hidden_states,)
543
+
544
+ if output_attentions:
545
+ # let the number of heads free (-1) so we can extract attention even after head pruning
546
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
547
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
548
+
549
+ if not return_dict:
550
+ return tuple(
551
+ v
552
+ for v in [hidden_states, presents, all_hidden_states, all_attentions, all_cross_attentions]
553
+ if v is not None
554
+ )
555
+
556
+ return TFBaseModelOutputWithPastAndCrossAttentions(
557
+ last_hidden_state=hidden_states,
558
+ past_key_values=presents,
559
+ hidden_states=all_hidden_states,
560
+ attentions=all_attentions,
561
+ cross_attentions=all_cross_attentions,
562
+ )
563
+
564
+ def build(self, input_shape=None):
565
+ if self.built:
566
+ return
567
+ self.built = True
568
+ if getattr(self, "wte", None) is not None:
569
+ with tf.name_scope(self.wte.name):
570
+ self.wte.build(None)
571
+ if getattr(self, "wpe", None) is not None:
572
+ with tf.name_scope(self.wpe.name):
573
+ self.wpe.build(None)
574
+ if getattr(self, "ln_f", None) is not None:
575
+ with tf.name_scope(self.ln_f.name):
576
+ self.ln_f.build([None, None, self.embed_dim])
577
+ if getattr(self, "h", None) is not None:
578
+ for layer in self.h:
579
+ with tf.name_scope(layer.name):
580
+ layer.build(None)
581
+
582
+
583
+ class TFGPT2PreTrainedModel(TFPreTrainedModel):
584
+ """
585
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
586
+ models.
587
+ """
588
+
589
+ config_class = GPT2Config
590
+ base_model_prefix = "transformer"
591
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
592
+ _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias", r"h.\d+.crossattention.bias"]
593
+
594
+ @property
595
+ def input_signature(self):
596
+ # Although GPT-2 supports token_type_ids in theory, in practice they are rarely used, and the implementation
597
+ # means that passing token_type_ids=0 yields different outputs from token_type_ids=None.
598
+ # Therefore, we remove the token_type_ids argument by default, even though it would usually be included.
599
+ return {
600
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
601
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
602
+ }
603
+
604
+
605
+ @dataclass
606
+ class TFGPT2DoubleHeadsModelOutput(ModelOutput):
607
+ """
608
+ Base class for outputs of models predicting if two sentences are consecutive or not.
609
+
610
+ Args:
611
+ logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
612
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
613
+ mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
614
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
615
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
616
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
617
+ sequence_length, embed_size_per_head)`).
618
+
619
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
620
+ `past_key_values` input) to speed up sequential decoding.
621
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
622
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
623
+ `(batch_size, sequence_length, hidden_size)`.
624
+
625
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
626
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
627
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
628
+ sequence_length)`.
629
+
630
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
631
+ heads.
632
+ """
633
+
634
+ logits: tf.Tensor = None
635
+ mc_logits: tf.Tensor = None
636
+ past_key_values: List[tf.Tensor] | None = None
637
+ hidden_states: Tuple[tf.Tensor] | None = None
638
+ attentions: Tuple[tf.Tensor] | None = None
639
+
640
+
641
+ GPT2_START_DOCSTRING = r"""
642
+
643
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
644
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
645
+ etc.)
646
+
647
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
648
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
649
+ behavior.
650
+
651
+ <Tip>
652
+
653
+ TensorFlow models and layers in `transformers` accept two formats as input:
654
+
655
+ - having all inputs as keyword arguments (like PyTorch models), or
656
+ - having all inputs as a list, tuple or dict in the first positional argument.
657
+
658
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
659
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
660
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
661
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
662
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
663
+ positional argument:
664
+
665
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
666
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
667
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
668
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
669
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
670
+
671
+ Note that when creating models and layers with
672
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
673
+ about any of this, as you can just pass inputs like you would to any other Python function!
674
+
675
+ </Tip>
676
+
677
+ Parameters:
678
+ config ([`GPT2Config`]): Model configuration class with all the parameters of the model.
679
+ Initializing with a config file does not load the weights associated with the model, only the
680
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
681
+ """
682
+
683
+ GPT2_INPUTS_DOCSTRING = r"""
684
+ Args:
685
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
686
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]`
687
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
688
+
689
+ If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as
690
+ `input_ids`.
691
+
692
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
693
+ [`PreTrainedTokenizer.encode`] for details.
694
+
695
+ [What are input IDs?](../glossary#input-ids)
696
+ past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
697
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
698
+ `past_key_values` output below). Can be used to speed up sequential decoding. The token ids which have
699
+ their past given to this model should not be passed as input ids as they have already been computed.
700
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
701
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
702
+
703
+ - 1 for tokens that are **not masked**,
704
+ - 0 for tokens that are **masked**.
705
+
706
+ If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
707
+ `past_key_values`. In other words, the `attention_mask` always has to have the length:
708
+ `len(past_key_values) + len(input_ids)`
709
+
710
+ [What are attention masks?](../glossary#attention-mask)
711
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
712
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
713
+ 1]`:
714
+
715
+ - 0 corresponds to a *sentence A* token,
716
+ - 1 corresponds to a *sentence B* token.
717
+
718
+ [What are token type IDs?](../glossary#token-type-ids)
719
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
720
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
721
+ config.max_position_embeddings - 1]`.
722
+
723
+ [What are position IDs?](../glossary#position-ids)
724
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
725
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
726
+
727
+ - 1 indicates the head is **not masked**,
728
+ - 0 indicates the head is **masked**.
729
+
730
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
731
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
732
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
733
+ model's internal embedding lookup matrix.
734
+ output_attentions (`bool`, *optional*):
735
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
736
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
737
+ config will be used instead.
738
+ output_hidden_states (`bool`, *optional*):
739
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
740
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
741
+ used instead.
742
+ return_dict (`bool`, *optional*):
743
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
744
+ eager mode, in graph mode the value will always be set to True.
745
+ training (`bool`, *optional*, defaults to `False`):
746
+ Whether or not to use the model in training mode (some modules like dropout modules have different
747
+ behaviors between training and evaluation).
748
+ """
749
+
750
+
751
+ @add_start_docstrings(
752
+ "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
753
+ GPT2_START_DOCSTRING,
754
+ )
755
+ class TFGPT2Model(TFGPT2PreTrainedModel):
756
+ def __init__(self, config, *inputs, **kwargs):
757
+ super().__init__(config, *inputs, **kwargs)
758
+ self.transformer = TFGPT2MainLayer(config, name="transformer")
759
+
760
+ @unpack_inputs
761
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
762
+ @add_code_sample_docstrings(
763
+ checkpoint=_CHECKPOINT_FOR_DOC,
764
+ output_type=TFBaseModelOutputWithPastAndCrossAttentions,
765
+ config_class=_CONFIG_FOR_DOC,
766
+ )
767
+ def call(
768
+ self,
769
+ input_ids: TFModelInputType | None = None,
770
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
771
+ attention_mask: np.ndarray | tf.Tensor | None = None,
772
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
773
+ position_ids: np.ndarray | tf.Tensor | None = None,
774
+ head_mask: np.ndarray | tf.Tensor | None = None,
775
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
776
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
777
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
778
+ use_cache: Optional[bool] = None,
779
+ output_attentions: Optional[bool] = None,
780
+ output_hidden_states: Optional[bool] = None,
781
+ return_dict: Optional[bool] = None,
782
+ training: Optional[bool] = False,
783
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
784
+ r"""
785
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
786
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
787
+ the model is configured as a decoder.
788
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
789
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
790
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
791
+
792
+ - 1 for tokens that are **not masked**,
793
+ - 0 for tokens that are **masked**.
794
+
795
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
796
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
797
+ If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have
798
+ their past key value states given to this model) of shape `(batch_size, 1)` instead of all
799
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
800
+ use_cache (`bool`, *optional*, defaults to `True`):
801
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
802
+ `past`). Set to `False` during training, `True` during generation
803
+ """
804
+
805
+ outputs = self.transformer(
806
+ input_ids=input_ids,
807
+ past_key_values=past_key_values,
808
+ attention_mask=attention_mask,
809
+ token_type_ids=token_type_ids,
810
+ position_ids=position_ids,
811
+ head_mask=head_mask,
812
+ inputs_embeds=inputs_embeds,
813
+ encoder_hidden_states=encoder_hidden_states,
814
+ encoder_attention_mask=encoder_attention_mask,
815
+ use_cache=use_cache,
816
+ output_attentions=output_attentions,
817
+ output_hidden_states=output_hidden_states,
818
+ return_dict=return_dict,
819
+ training=training,
820
+ )
821
+
822
+ return outputs
823
+
824
+ def build(self, input_shape=None):
825
+ if self.built:
826
+ return
827
+ self.built = True
828
+ if getattr(self, "transformer", None) is not None:
829
+ with tf.name_scope(self.transformer.name):
830
+ self.transformer.build(None)
831
+
832
+
833
+ @add_start_docstrings(
834
+ """
835
+ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
836
+ embeddings).
837
+ """,
838
+ GPT2_START_DOCSTRING,
839
+ )
840
+ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss):
841
+ def __init__(self, config, *inputs, **kwargs):
842
+ super().__init__(config, *inputs, **kwargs)
843
+ self.transformer = TFGPT2MainLayer(config, name="transformer")
844
+
845
+ def get_output_embeddings(self):
846
+ return self.get_input_embeddings()
847
+
848
+ def set_output_embeddings(self, value):
849
+ self.set_input_embeddings(value)
850
+
851
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
852
+ token_type_ids = kwargs.get("token_type_ids", None)
853
+ # only last token for inputs_ids if past is defined in kwargs
854
+ if past_key_values:
855
+ inputs = tf.expand_dims(inputs[:, -1], -1)
856
+ if token_type_ids is not None:
857
+ token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
858
+
859
+ position_ids = kwargs.get("position_ids", None)
860
+ attention_mask = kwargs.get("attention_mask", None)
861
+
862
+ if attention_mask is not None and position_ids is None:
863
+ position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
864
+ if past_key_values:
865
+ position_ids = tf.expand_dims(position_ids[:, -1], -1)
866
+
867
+ return {
868
+ "input_ids": inputs,
869
+ "attention_mask": attention_mask,
870
+ "position_ids": position_ids,
871
+ "past_key_values": past_key_values,
872
+ "use_cache": use_cache,
873
+ "token_type_ids": token_type_ids,
874
+ }
875
+
876
+ @unpack_inputs
877
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
878
+ @add_code_sample_docstrings(
879
+ checkpoint=_CHECKPOINT_FOR_DOC,
880
+ output_type=TFCausalLMOutputWithCrossAttentions,
881
+ config_class=_CONFIG_FOR_DOC,
882
+ )
883
+ def call(
884
+ self,
885
+ input_ids: TFModelInputType | None = None,
886
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
887
+ attention_mask: np.ndarray | tf.Tensor | None = None,
888
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
889
+ position_ids: np.ndarray | tf.Tensor | None = None,
890
+ head_mask: np.ndarray | tf.Tensor | None = None,
891
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
892
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
893
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
894
+ use_cache: Optional[bool] = None,
895
+ output_attentions: Optional[bool] = None,
896
+ output_hidden_states: Optional[bool] = None,
897
+ return_dict: Optional[bool] = None,
898
+ labels: np.ndarray | tf.Tensor | None = None,
899
+ training: Optional[bool] = False,
900
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
901
+ r"""
902
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
903
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
904
+ the model is configured as a decoder.
905
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
906
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
907
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
908
+
909
+ - 1 for tokens that are **not masked**,
910
+ - 0 for tokens that are **masked**.
911
+
912
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
913
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
914
+ If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have
915
+ their past key value states given to this model) of shape `(batch_size, 1)` instead of all
916
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
917
+ use_cache (`bool`, *optional*, defaults to `True`):
918
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
919
+ `past`). Set to `False` during training, `True` during generation
920
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
921
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
922
+ config.vocab_size - 1]`.
923
+ """
924
+
925
+ transformer_outputs = self.transformer(
926
+ input_ids=input_ids,
927
+ past_key_values=past_key_values,
928
+ attention_mask=attention_mask,
929
+ token_type_ids=token_type_ids,
930
+ position_ids=position_ids,
931
+ head_mask=head_mask,
932
+ inputs_embeds=inputs_embeds,
933
+ encoder_hidden_states=encoder_hidden_states,
934
+ encoder_attention_mask=encoder_attention_mask,
935
+ use_cache=use_cache,
936
+ output_attentions=output_attentions,
937
+ output_hidden_states=output_hidden_states,
938
+ return_dict=return_dict,
939
+ training=training,
940
+ )
941
+ hidden_states = transformer_outputs[0]
942
+ logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True)
943
+
944
+ loss = None
945
+ if labels is not None:
946
+ # shift labels to the left and cut last logit token
947
+ shifted_logits = logits[:, :-1]
948
+ labels = labels[:, 1:]
949
+ loss = self.hf_compute_loss(labels, shifted_logits)
950
+
951
+ if not return_dict:
952
+ output = (logits,) + transformer_outputs[1:]
953
+ return ((loss,) + output) if loss is not None else output
954
+
955
+ return TFCausalLMOutputWithCrossAttentions(
956
+ loss=loss,
957
+ logits=logits,
958
+ past_key_values=transformer_outputs.past_key_values,
959
+ hidden_states=transformer_outputs.hidden_states,
960
+ attentions=transformer_outputs.attentions,
961
+ cross_attentions=transformer_outputs.cross_attentions,
962
+ )
963
+
964
+ def build(self, input_shape=None):
965
+ if self.built:
966
+ return
967
+ self.built = True
968
+ if getattr(self, "transformer", None) is not None:
969
+ with tf.name_scope(self.transformer.name):
970
+ self.transformer.build(None)
971
+
972
+
973
+ @add_start_docstrings(
974
+ """
975
+ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
976
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
977
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
978
+ input sequence).
979
+ """,
980
+ GPT2_START_DOCSTRING,
981
+ )
982
+ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel):
983
+ def __init__(self, config, *inputs, **kwargs):
984
+ super().__init__(config, *inputs, **kwargs)
985
+ config.num_labels = 1
986
+ self.transformer = TFGPT2MainLayer(config, name="transformer")
987
+ self.multiple_choice_head = TFSequenceSummary(
988
+ config, initializer_range=config.initializer_range, name="multiple_choice_head"
989
+ )
990
+
991
+ @unpack_inputs
992
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
993
+ @replace_return_docstrings(output_type=TFGPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
994
+ def call(
995
+ self,
996
+ input_ids: TFModelInputType | None = None,
997
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
998
+ attention_mask: np.ndarray | tf.Tensor | None = None,
999
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1000
+ position_ids: np.ndarray | tf.Tensor | None = None,
1001
+ head_mask: np.ndarray | tf.Tensor | None = None,
1002
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1003
+ mc_token_ids: np.ndarray | tf.Tensor | None = None,
1004
+ use_cache: Optional[bool] = None,
1005
+ output_attentions: Optional[bool] = None,
1006
+ output_hidden_states: Optional[bool] = None,
1007
+ return_dict: Optional[bool] = None,
1008
+ training: Optional[bool] = False,
1009
+ ) -> Union[TFGPT2DoubleHeadsModelOutput, Tuple[tf.Tensor]]:
1010
+ r"""
1011
+ mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
1012
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1013
+ 1]`.
1014
+
1015
+ Return:
1016
+
1017
+ Examples:
1018
+
1019
+ ```python
1020
+ >>> import tensorflow as tf
1021
+ >>> from transformers import AutoTokenizer, TFGPT2DoubleHeadsModel
1022
+
1023
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
1024
+ >>> model = TFGPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2")
1025
+
1026
+ >>> # Add a [CLS] to the vocabulary (we should train it also!)
1027
+ >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
1028
+
1029
+ >>> embedding_layer = model.resize_token_embeddings(
1030
+ ... len(tokenizer)
1031
+ ... ) # Update the model embeddings with the new vocabulary size
1032
+
1033
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
1034
+ >>> encoded_choices = [tokenizer.encode(s) for s in choices]
1035
+ >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
1036
+
1037
+ >>> input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2
1038
+ >>> mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1
1039
+
1040
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
1041
+ >>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
1042
+ ```"""
1043
+
1044
+ if input_ids is not None:
1045
+ input_shapes = shape_list(input_ids)
1046
+ else:
1047
+ input_shapes = shape_list(inputs_embeds)[:-1]
1048
+
1049
+ seq_length = input_shapes[-1]
1050
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1051
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1052
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1053
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1054
+ transformer_outputs = self.transformer(
1055
+ input_ids=flat_input_ids,
1056
+ past_key_values=past_key_values,
1057
+ attention_mask=flat_attention_mask,
1058
+ token_type_ids=flat_token_type_ids,
1059
+ position_ids=flat_position_ids,
1060
+ head_mask=head_mask,
1061
+ inputs_embeds=inputs_embeds,
1062
+ encoder_hidden_states=None,
1063
+ encoder_attention_mask=None,
1064
+ use_cache=use_cache,
1065
+ output_attentions=output_attentions,
1066
+ output_hidden_states=output_hidden_states,
1067
+ return_dict=return_dict,
1068
+ training=training,
1069
+ )
1070
+ hidden_states = transformer_outputs[0]
1071
+ hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
1072
+ if return_dict and output_hidden_states:
1073
+ # We do this to match the slightly odd PT behaviour - the final hidden state is reshaped to rank 4 when the
1074
+ # input is rank 3, but all other hidden states remain at rank-3 (with the first 2 dims merged)
1075
+ all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,)
1076
+ else:
1077
+ all_hidden_states = None
1078
+ lm_logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True)
1079
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training)
1080
+ mc_logits = tf.squeeze(mc_logits, axis=-1)
1081
+
1082
+ if not return_dict:
1083
+ return (lm_logits, mc_logits) + transformer_outputs[1:]
1084
+
1085
+ return TFGPT2DoubleHeadsModelOutput(
1086
+ logits=lm_logits,
1087
+ mc_logits=mc_logits,
1088
+ past_key_values=transformer_outputs.past_key_values,
1089
+ hidden_states=all_hidden_states,
1090
+ attentions=transformer_outputs.attentions,
1091
+ )
1092
+
1093
+ @property
1094
+ def input_signature(self):
1095
+ return {
1096
+ "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
1097
+ "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
1098
+ "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="mc_token_ids"),
1099
+ }
1100
+
1101
+ def build(self, input_shape=None):
1102
+ if self.built:
1103
+ return
1104
+ self.built = True
1105
+ if getattr(self, "transformer", None) is not None:
1106
+ with tf.name_scope(self.transformer.name):
1107
+ self.transformer.build(None)
1108
+ if getattr(self, "multiple_choice_head", None) is not None:
1109
+ with tf.name_scope(self.multiple_choice_head.name):
1110
+ self.multiple_choice_head.build(None)
1111
+
1112
+
1113
+ @add_start_docstrings(
1114
+ """
1115
+ The GPT2 Model transformer with a sequence classification head on top (linear layer).
1116
+
1117
+ [`TFGPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1118
+ (e.g. GPT-1) do.
1119
+
1120
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1121
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1122
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1123
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1124
+ each row of the batch).
1125
+ """,
1126
+ GPT2_START_DOCSTRING,
1127
+ )
1128
+ class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassificationLoss):
1129
+ def __init__(self, config, *inputs, **kwargs):
1130
+ super().__init__(config, *inputs, **kwargs)
1131
+ self.num_labels = config.num_labels
1132
+ self.score = keras.layers.Dense(
1133
+ config.num_labels,
1134
+ kernel_initializer=get_initializer(config.initializer_range),
1135
+ name="score",
1136
+ use_bias=False,
1137
+ )
1138
+ self.transformer = TFGPT2MainLayer(config, name="transformer")
1139
+ self.config = config
1140
+
1141
+ @unpack_inputs
1142
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1143
+ @add_code_sample_docstrings(
1144
+ checkpoint="microsoft/DialogRPT-updown",
1145
+ output_type=TFSequenceClassifierOutputWithPast,
1146
+ config_class=_CONFIG_FOR_DOC,
1147
+ )
1148
+ def call(
1149
+ self,
1150
+ input_ids: TFModelInputType | None = None,
1151
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1152
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1153
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1154
+ position_ids: np.ndarray | tf.Tensor | None = None,
1155
+ head_mask: np.ndarray | tf.Tensor | None = None,
1156
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1157
+ use_cache: Optional[bool] = None,
1158
+ output_attentions: Optional[bool] = None,
1159
+ output_hidden_states: Optional[bool] = None,
1160
+ return_dict: Optional[bool] = None,
1161
+ labels: np.ndarray | tf.Tensor | None = None,
1162
+ training: Optional[bool] = False,
1163
+ ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
1164
+ r"""
1165
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1166
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1167
+ config.vocab_size - 1]`.
1168
+ """
1169
+ transformer_outputs = self.transformer(
1170
+ input_ids=input_ids,
1171
+ past_key_values=past_key_values,
1172
+ attention_mask=attention_mask,
1173
+ token_type_ids=token_type_ids,
1174
+ position_ids=position_ids,
1175
+ head_mask=head_mask,
1176
+ inputs_embeds=inputs_embeds,
1177
+ use_cache=use_cache,
1178
+ output_attentions=output_attentions,
1179
+ output_hidden_states=output_hidden_states,
1180
+ return_dict=return_dict,
1181
+ training=training,
1182
+ )
1183
+
1184
+ hidden_states = transformer_outputs[0]
1185
+ logits = self.score(hidden_states)
1186
+ logits_shape = shape_list(logits)
1187
+ in_logits = None
1188
+ if self.config.pad_token_id is None:
1189
+ sequence_lengths = -1
1190
+ else:
1191
+ if input_ids is not None:
1192
+ sequence_lengths = (
1193
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
1194
+ - 1
1195
+ )
1196
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
1197
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
1198
+ else:
1199
+ sequence_lengths = -1
1200
+ logger.warning(
1201
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1202
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1203
+ )
1204
+ loss = None
1205
+
1206
+ if labels is not None:
1207
+ assert (
1208
+ self.config.pad_token_id is not None or logits_shape[0] == 1
1209
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1210
+
1211
+ if not tf.is_tensor(sequence_lengths):
1212
+ in_logits = logits[0 : logits_shape[0], sequence_lengths]
1213
+
1214
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
1215
+ pooled_logits = in_logits if in_logits is not None else logits
1216
+
1217
+ if not return_dict:
1218
+ output = (pooled_logits,) + transformer_outputs[1:]
1219
+ return ((loss,) + output) if loss is not None else output
1220
+
1221
+ return TFSequenceClassifierOutputWithPast(
1222
+ loss=loss,
1223
+ logits=pooled_logits,
1224
+ past_key_values=transformer_outputs.past_key_values,
1225
+ hidden_states=transformer_outputs.hidden_states,
1226
+ attentions=transformer_outputs.attentions,
1227
+ )
1228
+
1229
+ def build(self, input_shape=None):
1230
+ if self.built:
1231
+ return
1232
+ self.built = True
1233
+ if getattr(self, "score", None) is not None:
1234
+ with tf.name_scope(self.score.name):
1235
+ self.score.build([None, None, self.config.n_embd])
1236
+ if getattr(self, "transformer", None) is not None:
1237
+ with tf.name_scope(self.transformer.name):
1238
+ self.transformer.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for OpenAI GPT."""
16
+
17
+
18
+ import json
19
+ import os
20
+ from functools import lru_cache
21
+ from typing import List, Optional, Tuple
22
+
23
+ import regex as re
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+
37
+ @lru_cache()
38
+ def bytes_to_unicode():
39
+ """
40
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
41
+ characters the bpe code barfs on.
42
+
43
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
44
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
45
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
46
+ tables between utf-8 bytes and unicode strings.
47
+ """
48
+ bs = (
49
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
50
+ )
51
+ cs = bs[:]
52
+ n = 0
53
+ for b in range(2**8):
54
+ if b not in bs:
55
+ bs.append(b)
56
+ cs.append(2**8 + n)
57
+ n += 1
58
+ cs = [chr(n) for n in cs]
59
+ return dict(zip(bs, cs))
60
+
61
+
62
+ def get_pairs(word):
63
+ """
64
+ Return set of symbol pairs in a word.
65
+
66
+ Word is represented as tuple of symbols (symbols being variable-length strings).
67
+ """
68
+ pairs = set()
69
+ prev_char = word[0]
70
+ for char in word[1:]:
71
+ pairs.add((prev_char, char))
72
+ prev_char = char
73
+ return pairs
74
+
75
+
76
+ class GPT2Tokenizer(PreTrainedTokenizer):
77
+ """
78
+ Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding.
79
+
80
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
81
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
82
+
83
+ ```python
84
+ >>> from transformers import GPT2Tokenizer
85
+
86
+ >>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
87
+ >>> tokenizer("Hello world")["input_ids"]
88
+ [15496, 995]
89
+
90
+ >>> tokenizer(" Hello world")["input_ids"]
91
+ [18435, 995]
92
+ ```
93
+
94
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
95
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
96
+
97
+ <Tip>
98
+
99
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
100
+
101
+ </Tip>
102
+
103
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
104
+ this superclass for more information regarding those methods.
105
+
106
+ Args:
107
+ vocab_file (`str`):
108
+ Path to the vocabulary file.
109
+ merges_file (`str`):
110
+ Path to the merges file.
111
+ errors (`str`, *optional*, defaults to `"replace"`):
112
+ Paradigm to follow when decoding bytes to UTF-8. See
113
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
114
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
115
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
116
+ token instead.
117
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
118
+ The beginning of sequence token.
119
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
120
+ The end of sequence token.
121
+ pad_token (`str`, *optional*):
122
+ The token used for padding, for example when batching sequences of different lengths.
123
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
124
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
125
+ other word. (GPT2 tokenizer detect beginning of words by the preceding space).
126
+ add_bos_token (`bool`, *optional*, defaults to `False`):
127
+ Whether or not to add an initial beginning of sentence token to the input. This allows to treat the leading
128
+ word just as any other word.
129
+ """
130
+
131
+ vocab_files_names = VOCAB_FILES_NAMES
132
+ model_input_names = ["input_ids", "attention_mask"]
133
+
134
+ def __init__(
135
+ self,
136
+ vocab_file,
137
+ merges_file,
138
+ errors="replace",
139
+ unk_token="<|endoftext|>",
140
+ bos_token="<|endoftext|>",
141
+ eos_token="<|endoftext|>",
142
+ pad_token=None,
143
+ add_prefix_space=False,
144
+ add_bos_token=False,
145
+ **kwargs,
146
+ ):
147
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
148
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
149
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
150
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
151
+
152
+ self.add_bos_token = add_bos_token
153
+
154
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
155
+ self.encoder = json.load(vocab_handle)
156
+ self.decoder = {v: k for k, v in self.encoder.items()}
157
+ self.errors = errors # how to handle errors in decoding
158
+ self.byte_encoder = bytes_to_unicode()
159
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
160
+ with open(merges_file, encoding="utf-8") as merges_handle:
161
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
162
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
163
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
164
+ self.cache = {}
165
+ self.add_prefix_space = add_prefix_space
166
+
167
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
168
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
169
+
170
+ super().__init__(
171
+ errors=errors,
172
+ unk_token=unk_token,
173
+ bos_token=bos_token,
174
+ eos_token=eos_token,
175
+ pad_token=pad_token,
176
+ add_prefix_space=add_prefix_space,
177
+ add_bos_token=add_bos_token,
178
+ **kwargs,
179
+ )
180
+
181
+ @property
182
+ def vocab_size(self):
183
+ return len(self.encoder)
184
+
185
+ def get_vocab(self):
186
+ return dict(self.encoder, **self.added_tokens_encoder)
187
+
188
+ def bpe(self, token):
189
+ if token in self.cache:
190
+ return self.cache[token]
191
+ word = tuple(token)
192
+ pairs = get_pairs(word)
193
+
194
+ if not pairs:
195
+ return token
196
+
197
+ while True:
198
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
199
+ if bigram not in self.bpe_ranks:
200
+ break
201
+ first, second = bigram
202
+ new_word = []
203
+ i = 0
204
+ while i < len(word):
205
+ try:
206
+ j = word.index(first, i)
207
+ except ValueError:
208
+ new_word.extend(word[i:])
209
+ break
210
+ else:
211
+ new_word.extend(word[i:j])
212
+ i = j
213
+
214
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
215
+ new_word.append(first + second)
216
+ i += 2
217
+ else:
218
+ new_word.append(word[i])
219
+ i += 1
220
+ new_word = tuple(new_word)
221
+ word = new_word
222
+ if len(word) == 1:
223
+ break
224
+ else:
225
+ pairs = get_pairs(word)
226
+ word = " ".join(word)
227
+ self.cache[token] = word
228
+ return word
229
+
230
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
231
+ if self.add_bos_token:
232
+ bos_token_ids = [self.bos_token_id]
233
+ else:
234
+ bos_token_ids = []
235
+
236
+ output = bos_token_ids + token_ids_0
237
+
238
+ if token_ids_1 is None:
239
+ return output
240
+
241
+ return output + bos_token_ids + token_ids_1
242
+
243
+ def get_special_tokens_mask(
244
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
245
+ ) -> List[int]:
246
+ """
247
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
248
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
256
+ Whether or not the token list is already formatted with special tokens for the model.
257
+
258
+ Returns:
259
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
260
+ """
261
+ if already_has_special_tokens:
262
+ return super().get_special_tokens_mask(
263
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
264
+ )
265
+
266
+ if not self.add_bos_token:
267
+ return super().get_special_tokens_mask(
268
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
269
+ )
270
+
271
+ if token_ids_1 is None:
272
+ return [1] + ([0] * len(token_ids_0))
273
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
274
+
275
+ def _tokenize(self, text):
276
+ """Tokenize a string."""
277
+ bpe_tokens = []
278
+ for token in re.findall(self.pat, text):
279
+ token = "".join(
280
+ self.byte_encoder[b] for b in token.encode("utf-8")
281
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
282
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
283
+ return bpe_tokens
284
+
285
+ def _convert_token_to_id(self, token):
286
+ """Converts a token (str) in an id using the vocab."""
287
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
288
+
289
+ def _convert_id_to_token(self, index):
290
+ """Converts an index (integer) in a token (str) using the vocab."""
291
+ return self.decoder.get(index)
292
+
293
+ def convert_tokens_to_string(self, tokens):
294
+ """Converts a sequence of tokens (string) in a single string."""
295
+ text = "".join(tokens)
296
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
297
+ return text
298
+
299
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
300
+ if not os.path.isdir(save_directory):
301
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
302
+ return
303
+ vocab_file = os.path.join(
304
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
305
+ )
306
+ merge_file = os.path.join(
307
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
308
+ )
309
+
310
+ with open(vocab_file, "w", encoding="utf-8") as f:
311
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
312
+
313
+ index = 0
314
+ with open(merge_file, "w", encoding="utf-8") as writer:
315
+ writer.write("#version: 0.2\n")
316
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
317
+ if index != token_index:
318
+ logger.warning(
319
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
320
+ " Please check that the tokenizer is not corrupted!"
321
+ )
322
+ index = token_index
323
+ writer.write(" ".join(bpe_tokens) + "\n")
324
+ index += 1
325
+
326
+ return vocab_file, merge_file
327
+
328
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
329
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
330
+ if is_split_into_words or add_prefix_space:
331
+ text = " " + text
332
+ return (text, kwargs)
333
+
334
+ @property
335
+ def default_chat_template(self):
336
+ """
337
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
338
+ """
339
+ logger.warning_once(
340
+ "\nNo chat template is defined for this tokenizer - using the default template "
341
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
342
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
343
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
344
+ )
345
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_fast.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for OpenAI GPT."""
16
+
17
+
18
+ import json
19
+ from typing import Optional, Tuple
20
+
21
+ from tokenizers import pre_tokenizers
22
+
23
+ from ...tokenization_utils_base import BatchEncoding
24
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
25
+ from ...utils import logging
26
+ from .tokenization_gpt2 import GPT2Tokenizer
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
32
+
33
+
34
+ class GPT2TokenizerFast(PreTrainedTokenizerFast):
35
+ """
36
+ Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
37
+ Byte-Pair-Encoding.
38
+
39
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
40
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
41
+
42
+ ```python
43
+ >>> from transformers import GPT2TokenizerFast
44
+
45
+ >>> tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2")
46
+ >>> tokenizer("Hello world")["input_ids"]
47
+ [15496, 995]
48
+
49
+ >>> tokenizer(" Hello world")["input_ids"]
50
+ [18435, 995]
51
+ ```
52
+
53
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
54
+ the model was not pretrained this way, it might yield a decrease in performance.
55
+
56
+ <Tip>
57
+
58
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
59
+
60
+ </Tip>
61
+
62
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
63
+ refer to this superclass for more information regarding those methods.
64
+
65
+ Args:
66
+ vocab_file (`str`, *optional*):
67
+ Path to the vocabulary file.
68
+ merges_file (`str`, *optional*):
69
+ Path to the merges file.
70
+ tokenizer_file (`str`, *optional*):
71
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
72
+ contains everything needed to load the tokenizer.
73
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
74
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
75
+ token instead.
76
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
77
+ The beginning of sequence token.
78
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
79
+ The end of sequence token.
80
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
81
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
82
+ other word. (GPT2 tokenizer detect beginning of words by the preceding space).
83
+ """
84
+
85
+ vocab_files_names = VOCAB_FILES_NAMES
86
+ model_input_names = ["input_ids", "attention_mask"]
87
+ slow_tokenizer_class = GPT2Tokenizer
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_file=None,
92
+ merges_file=None,
93
+ tokenizer_file=None,
94
+ unk_token="<|endoftext|>",
95
+ bos_token="<|endoftext|>",
96
+ eos_token="<|endoftext|>",
97
+ add_prefix_space=False,
98
+ **kwargs,
99
+ ):
100
+ super().__init__(
101
+ vocab_file,
102
+ merges_file,
103
+ tokenizer_file=tokenizer_file,
104
+ unk_token=unk_token,
105
+ bos_token=bos_token,
106
+ eos_token=eos_token,
107
+ add_prefix_space=add_prefix_space,
108
+ **kwargs,
109
+ )
110
+
111
+ self.add_bos_token = kwargs.pop("add_bos_token", False)
112
+
113
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
114
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
115
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
116
+ pre_tok_state["add_prefix_space"] = add_prefix_space
117
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
118
+
119
+ self.add_prefix_space = add_prefix_space
120
+
121
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
122
+ is_split_into_words = kwargs.get("is_split_into_words", False)
123
+ assert self.add_prefix_space or not is_split_into_words, (
124
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
125
+ "to use it with pretokenized inputs."
126
+ )
127
+
128
+ return super()._batch_encode_plus(*args, **kwargs)
129
+
130
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
131
+ is_split_into_words = kwargs.get("is_split_into_words", False)
132
+
133
+ assert self.add_prefix_space or not is_split_into_words, (
134
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
135
+ "to use it with pretokenized inputs."
136
+ )
137
+
138
+ return super()._encode_plus(*args, **kwargs)
139
+
140
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
141
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
142
+ return tuple(files)
143
+
144
+ @property
145
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template
146
+ def default_chat_template(self):
147
+ """
148
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
149
+ """
150
+ logger.warning_once(
151
+ "\nNo chat template is defined for this tokenizer - using the default template "
152
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
153
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
154
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
155
+ )
156
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_tf.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List, Union
3
+
4
+ import tensorflow as tf
5
+ from keras_nlp.tokenizers import BytePairTokenizer
6
+ from tensorflow_text import pad_model_inputs
7
+
8
+ from ...modeling_tf_utils import keras
9
+ from .tokenization_gpt2 import GPT2Tokenizer
10
+
11
+
12
+ class TFGPT2Tokenizer(keras.layers.Layer):
13
+ """
14
+ This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the
15
+ `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings
16
+ from an existing standard tokenizer object.
17
+
18
+ In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run
19
+ when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options
20
+ than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes
21
+ straight from `tf.string` inputs to outputs.
22
+
23
+ Args:
24
+ vocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer
25
+ merges (List[str]): Merges list for Byte Pair Tokenizer
26
+ """
27
+
28
+ def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: int = None, pad_token_id: int = None):
29
+ super().__init__()
30
+ self.pad_token_id = pad_token_id
31
+ self.max_length = max_length
32
+ self.vocab = vocab
33
+ self.merges = merges
34
+ self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length)
35
+
36
+ @classmethod
37
+ def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs):
38
+ """Creates TFGPT2Tokenizer from GPT2Tokenizer
39
+
40
+ Args:
41
+ tokenizer (GPT2Tokenizer)
42
+
43
+ Examples:
44
+
45
+ ```python
46
+ from transformers import AutoTokenizer, TFGPT2Tokenizer
47
+
48
+ tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
49
+ tf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer)
50
+ ```
51
+ """
52
+ merges = [" ".join(m) for m in tokenizer.bpe_ranks.keys()]
53
+ vocab = tokenizer.get_vocab()
54
+ return cls(vocab, merges, *args, **kwargs)
55
+
56
+ @classmethod
57
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):
58
+ """Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer
59
+
60
+ Args:
61
+ pretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model
62
+
63
+ Examples:
64
+
65
+ ```python
66
+ from transformers import TFGPT2Tokenizer
67
+
68
+ tf_tokenizer = TFGPT2Tokenizer.from_pretrained("openai-community/gpt2")
69
+ ```
70
+ """
71
+ tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)
72
+ return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs)
73
+
74
+ @classmethod
75
+ def from_config(cls, config):
76
+ """Creates TFGPT2Tokenizer from configurations
77
+
78
+ Args:
79
+ config (Dict): Dictionary with keys such as stated in `get_config`.
80
+ """
81
+ return cls(**config)
82
+
83
+ def get_config(self):
84
+ return {
85
+ "vocab": self.vocab,
86
+ "merges": self.merges,
87
+ "max_length": self.max_length,
88
+ "pad_token_id": self.pad_token_id,
89
+ }
90
+
91
+ def call(self, x, max_length: int = None):
92
+ input_ids = self.tf_tokenizer(x)
93
+ attention_mask = tf.ones_like(input_ids)
94
+
95
+ if self.pad_token_id is not None:
96
+ # pad the tokens up to max length
97
+ max_length = max_length if max_length is not None else self.max_length
98
+
99
+ if max_length is not None:
100
+ input_ids, attention_mask = pad_model_inputs(
101
+ input_ids, max_seq_length=max_length, pad_value=self.pad_token_id
102
+ )
103
+
104
+ return {"attention_mask": attention_mask, "input_ids": input_ids}
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
17
+ from ...utils import OptionalDependencyNotAvailable
18
+
19
+
20
+ _import_structure = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
21
+
22
+ try:
23
+ if not is_tokenizers_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_gpt_neox_fast"] = ["GPTNeoXTokenizerFast"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_gpt_neox"] = [
37
+ "GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "GPTNeoXForCausalLM",
39
+ "GPTNeoXForQuestionAnswering",
40
+ "GPTNeoXForSequenceClassification",
41
+ "GPTNeoXForTokenClassification",
42
+ "GPTNeoXLayer",
43
+ "GPTNeoXModel",
44
+ "GPTNeoXPreTrainedModel",
45
+ ]
46
+
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
50
+
51
+ try:
52
+ if not is_tokenizers_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
58
+
59
+ try:
60
+ if not is_torch_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .modeling_gpt_neox import (
66
+ GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
67
+ GPTNeoXForCausalLM,
68
+ GPTNeoXForQuestionAnswering,
69
+ GPTNeoXForSequenceClassification,
70
+ GPTNeoXForTokenClassification,
71
+ GPTNeoXLayer,
72
+ GPTNeoXModel,
73
+ GPTNeoXPreTrainedModel,
74
+ )
75
+
76
+
77
+ else:
78
+ import sys
79
+
80
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/configuration_gpt_neox.cpython-310.pyc ADDED
Binary file (7.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/modeling_gpt_neox.cpython-310.pyc ADDED
Binary file (41.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/__pycache__/tokenization_gpt_neox_fast.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/configuration_gpt_neox.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPTNeoX model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GPTNeoXConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an
30
+ GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the GPTNeoX
32
+ [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50432):
40
+ Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`GPTNeoXModel`].
42
+ hidden_size (`int`, *optional*, defaults to 6144):
43
+ Dimension of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 44):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 64):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 24576):
49
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
53
+ rotary_pct (`float`, *optional*, defaults to 0.25):
54
+ percentage of hidden dimensions to allocate to rotary embeddings
55
+ rotary_emb_base (`int`, *optional*, defaults to 10000)
56
+ base for computing rotary embeddings frequency
57
+ attention_dropout (`float`, *optional*, defaults to 0.0):
58
+ The dropout ratio probability of the attention score.
59
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio of (1) the word embeddings, (2) the post-attention hidden states, and (3) the post-mlp
61
+ hidden states.
62
+ classifier_dropout (`float`, *optional*, defaults to 0.1):
63
+ Argument used when doing token classification, used in the model [`GPTNeoXForTokenClassification`].
64
+
65
+ The dropout ratio for the hidden layer.
66
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ initializer_range (`float`, *optional*, defaults to 1e-5):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ use_cache (`bool`, *optional*, defaults to `True`):
74
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
75
+ relevant if `config.is_decoder=True`.
76
+ use_parallel_residual (`bool`, *optional*, defaults to `True`):
77
+ Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training
78
+ speedup at large scales (e.g. 20B).
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
81
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
82
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
83
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
84
+ these scaling strategies behave:
85
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
86
+ experimental feature, subject to breaking API changes in future versions.
87
+ attention_bias (`bool`, *optional*, defaults to `True`):
88
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import GPTNeoXConfig, GPTNeoXModel
94
+
95
+ >>> # Initializing a GPTNeoX gpt-neox-20b style configuration
96
+ >>> configuration = GPTNeoXConfig()
97
+
98
+ >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration
99
+ >>> model = GPTNeoXModel(configuration) # doctest: +SKIP
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config # doctest: +SKIP
103
+ ```"""
104
+
105
+ model_type = "gpt_neox"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=50432,
111
+ hidden_size=6144,
112
+ num_hidden_layers=44,
113
+ num_attention_heads=64,
114
+ intermediate_size=24576,
115
+ hidden_act="gelu",
116
+ rotary_pct=0.25,
117
+ rotary_emb_base=10000,
118
+ attention_dropout=0.0,
119
+ hidden_dropout=0.0,
120
+ classifier_dropout=0.1,
121
+ max_position_embeddings=2048,
122
+ initializer_range=0.02,
123
+ layer_norm_eps=1e-5,
124
+ use_cache=True,
125
+ bos_token_id=0,
126
+ eos_token_id=2,
127
+ tie_word_embeddings=False,
128
+ use_parallel_residual=True,
129
+ rope_scaling=None,
130
+ attention_bias=True,
131
+ **kwargs,
132
+ ):
133
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
134
+ self.vocab_size = vocab_size
135
+ self.max_position_embeddings = max_position_embeddings
136
+ self.hidden_size = hidden_size
137
+ self.num_hidden_layers = num_hidden_layers
138
+ self.num_attention_heads = num_attention_heads
139
+ self.intermediate_size = intermediate_size
140
+ self.hidden_act = hidden_act
141
+ self.rotary_pct = rotary_pct
142
+ self.rotary_emb_base = rotary_emb_base
143
+ self.attention_dropout = attention_dropout
144
+ self.hidden_dropout = hidden_dropout
145
+ self.classifier_dropout = classifier_dropout
146
+ self.initializer_range = initializer_range
147
+ self.layer_norm_eps = layer_norm_eps
148
+ self.use_cache = use_cache
149
+ self.tie_word_embeddings = tie_word_embeddings
150
+ self.use_parallel_residual = use_parallel_residual
151
+ self.rope_scaling = rope_scaling
152
+ self.attention_bias = attention_bias
153
+ self._rope_scaling_validation()
154
+
155
+ if self.hidden_size % self.num_attention_heads != 0:
156
+ raise ValueError(
157
+ "The hidden size is not divisble by the number of attention heads! Make sure to update them!"
158
+ )
159
+
160
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
161
+ def _rope_scaling_validation(self):
162
+ """
163
+ Validate the `rope_scaling` configuration.
164
+ """
165
+ if self.rope_scaling is None:
166
+ return
167
+
168
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
169
+ raise ValueError(
170
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
171
+ )
172
+ rope_scaling_type = self.rope_scaling.get("type", None)
173
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
174
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
175
+ raise ValueError(
176
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
177
+ )
178
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
179
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py ADDED
@@ -0,0 +1,1426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPTNeoX model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+ from torch.nn import functional as F
24
+
25
+ from ...activations import ACT2FN
26
+ from ...file_utils import (
27
+ add_code_sample_docstrings,
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ replace_return_docstrings,
31
+ )
32
+ from ...modeling_outputs import (
33
+ BaseModelOutputWithPast,
34
+ CausalLMOutputWithPast,
35
+ QuestionAnsweringModelOutput,
36
+ SequenceClassifierOutputWithPast,
37
+ TokenClassifierOutput,
38
+ )
39
+ from ...modeling_utils import PreTrainedModel
40
+ from ...utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging
41
+ from .configuration_gpt_neox import GPTNeoXConfig
42
+
43
+
44
+ if is_flash_attn_2_available():
45
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
46
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM"
52
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neox-20b"
53
+ _CONFIG_FOR_DOC = "GPTNeoXConfig"
54
+
55
+
56
+ from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
60
+ def _get_unpad_data(attention_mask):
61
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
62
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
63
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
64
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
65
+ return (
66
+ indices,
67
+ cu_seqlens,
68
+ max_seqlen_in_batch,
69
+ )
70
+
71
+
72
+ class GPTNeoXPreTrainedModel(PreTrainedModel):
73
+ """
74
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
75
+ models.
76
+ """
77
+
78
+ config_class = GPTNeoXConfig
79
+ base_model_prefix = "gpt_neox"
80
+ supports_gradient_checkpointing = True
81
+ _no_split_modules = ["GPTNeoXLayer"]
82
+ _skip_keys_device_placement = "past_key_values"
83
+ _supports_flash_attn_2 = True
84
+
85
+ def _init_weights(self, module):
86
+ """Initialize the weights"""
87
+ if isinstance(module, nn.Linear):
88
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
89
+ if module.bias is not None:
90
+ module.bias.data.zero_()
91
+ elif isinstance(module, nn.Embedding):
92
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
93
+ if module.padding_idx is not None:
94
+ module.weight.data[module.padding_idx].zero_()
95
+ elif isinstance(module, nn.LayerNorm):
96
+ module.bias.data.zero_()
97
+ module.weight.data.fill_(1.0)
98
+
99
+
100
+ class GPTNeoXAttention(nn.Module):
101
+ def __init__(self, config):
102
+ super().__init__()
103
+ self.config = config
104
+ self.num_attention_heads = config.num_attention_heads
105
+ self.hidden_size = config.hidden_size
106
+ if self.hidden_size % self.num_attention_heads != 0:
107
+ raise ValueError(
108
+ "The hidden size is not divisble by the number of attention heads! Make sure to update them"
109
+ )
110
+ self.head_size = self.hidden_size // self.num_attention_heads
111
+ self.rotary_ndims = int(self.head_size * config.rotary_pct)
112
+ self._init_bias(config.max_position_embeddings)
113
+
114
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
115
+ self._init_rope()
116
+
117
+ self.norm_factor = self.head_size**-0.5
118
+ self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias)
119
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
120
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
121
+ self.is_causal = True
122
+
123
+ def _init_bias(self, max_positions, device=None):
124
+ self.register_buffer(
125
+ "bias",
126
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
127
+ 1, 1, max_positions, max_positions
128
+ ),
129
+ persistent=False,
130
+ )
131
+ if device is not None:
132
+ self.bias = self.bias.to(device)
133
+
134
+ def _init_rope(self):
135
+ if self.config.rope_scaling is None:
136
+ self.rotary_emb = GPTNeoXRotaryEmbedding(
137
+ self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base
138
+ )
139
+ else:
140
+ scaling_type = self.config.rope_scaling["type"]
141
+ scaling_factor = self.config.rope_scaling["factor"]
142
+ if scaling_type == "linear":
143
+ self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding(
144
+ self.rotary_ndims,
145
+ self.config.max_position_embeddings,
146
+ base=self.config.rotary_emb_base,
147
+ scaling_factor=scaling_factor,
148
+ )
149
+ elif scaling_type == "dynamic":
150
+ self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding(
151
+ self.rotary_ndims,
152
+ self.config.max_position_embeddings,
153
+ base=self.config.rotary_emb_base,
154
+ scaling_factor=scaling_factor,
155
+ )
156
+ else:
157
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
158
+
159
+ def forward(
160
+ self,
161
+ hidden_states: torch.FloatTensor,
162
+ attention_mask: torch.FloatTensor,
163
+ position_ids: torch.LongTensor,
164
+ head_mask: Optional[torch.FloatTensor] = None,
165
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
166
+ use_cache: Optional[bool] = False,
167
+ output_attentions: Optional[bool] = False,
168
+ padding_mask: Optional[torch.Tensor] = None,
169
+ ):
170
+ has_layer_past = layer_past is not None
171
+
172
+ # Compute QKV
173
+ # Attention heads [batch, seq_len, hidden_size]
174
+ # --> [batch, seq_len, (np * 3 * head_size)]
175
+ qkv = self.query_key_value(hidden_states)
176
+
177
+ # [batch, seq_len, (num_heads * 3 * head_size)]
178
+ # --> [batch, seq_len, num_heads, 3 * head_size]
179
+ new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
180
+ qkv = qkv.view(*new_qkv_shape)
181
+
182
+ # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
183
+ query = qkv[..., : self.head_size].permute(0, 2, 1, 3)
184
+ key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)
185
+ value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)
186
+
187
+ # Compute rotary embeddings on rotary_ndims
188
+ query_rot = query[..., : self.rotary_ndims]
189
+ query_pass = query[..., self.rotary_ndims :]
190
+ key_rot = key[..., : self.rotary_ndims]
191
+ key_pass = key[..., self.rotary_ndims :]
192
+
193
+ # Compute token offset for rotary embeddings (when decoding)
194
+ seq_len = key.shape[-2]
195
+ if has_layer_past:
196
+ seq_len += layer_past[0].shape[-2]
197
+ cos, sin = self.rotary_emb(value, seq_len=seq_len)
198
+ query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
199
+ query = torch.cat((query, query_pass), dim=-1)
200
+ key = torch.cat((key, key_pass), dim=-1)
201
+
202
+ # Cache QKV values
203
+ if has_layer_past:
204
+ past_key = layer_past[0]
205
+ past_value = layer_past[1]
206
+ key = torch.cat((past_key, key), dim=-2)
207
+ value = torch.cat((past_value, value), dim=-2)
208
+ present = (key, value) if use_cache else None
209
+
210
+ # Compute attention
211
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
212
+
213
+ # Reshape outputs
214
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)
215
+ attn_output = self.dense(attn_output)
216
+
217
+ outputs = (attn_output, present)
218
+ if output_attentions:
219
+ outputs += (attn_weights,)
220
+
221
+ return outputs
222
+
223
+ @classmethod
224
+ def _split_heads(cls, tensor, num_attention_heads, attn_head_size):
225
+ """
226
+ Splits hidden dim into attn_head_size and num_attention_heads
227
+ """
228
+ # tensor: [bs, seq_len, hidden_size]
229
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
230
+ # -> [bs, seq_len, num_attention_heads, attn_head_size]
231
+ tensor = tensor.view(new_shape)
232
+ # -> [bs, num_attention_heads, seq_len, attn_head_size]
233
+ tensor = tensor.permute(0, 2, 1, 3)
234
+ return tensor
235
+
236
+ @classmethod
237
+ def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):
238
+ """
239
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
240
+ """
241
+ # tensor [bs, num_attention_heads, seq_len, attn_head_size]
242
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
243
+ # -> [bs, seq_len, num_attention_heads, attn_head_size]
244
+ tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)
245
+ # -> [bs, seq_len, hidden_size]
246
+ return tensor
247
+
248
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
249
+ # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]
250
+ # compute causal mask from causal mask buffer
251
+ batch_size, num_attention_heads, query_length, attn_head_size = query.size()
252
+ key_length = key.size(-2)
253
+
254
+ # dynamically increase the causal mask with the key length, if needed.
255
+ if key_length > self.bias.shape[-1]:
256
+ self._init_bias(key_length, device=key.device)
257
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
258
+
259
+ query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)
260
+ key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)
261
+ attn_scores = torch.zeros(
262
+ batch_size * num_attention_heads,
263
+ query_length,
264
+ key_length,
265
+ dtype=query.dtype,
266
+ device=key.device,
267
+ )
268
+ attn_scores = torch.baddbmm(
269
+ attn_scores,
270
+ query,
271
+ key.transpose(1, 2),
272
+ beta=1.0,
273
+ alpha=self.norm_factor,
274
+ )
275
+ attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)
276
+
277
+ mask_value = torch.finfo(attn_scores.dtype).min
278
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
279
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
280
+ mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)
281
+ attn_scores = torch.where(causal_mask, attn_scores, mask_value)
282
+
283
+ if attention_mask is not None:
284
+ # Apply the attention mask
285
+ attn_scores = attn_scores + attention_mask
286
+
287
+ attn_weights = nn.functional.softmax(attn_scores, dim=-1)
288
+ attn_weights = attn_weights.to(value.dtype)
289
+
290
+ # Mask heads if we want to
291
+ if head_mask is not None:
292
+ attn_weights = attn_weights * head_mask
293
+
294
+ attn_weights = self.attention_dropout(attn_weights)
295
+
296
+ attn_output = torch.matmul(attn_weights, value)
297
+ return attn_output, attn_weights
298
+
299
+
300
+ class GPTNeoXFlashAttention2(GPTNeoXAttention):
301
+ """
302
+ GPTNeoX flash attention module. This module inherits from `GPTNeoXAttention` as the weights of the module stays
303
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
304
+ flash attention and deal with padding tokens in case the input contains any of them.
305
+ """
306
+
307
+ def __init__(self, *args, **kwargs):
308
+ super().__init__(*args, **kwargs)
309
+
310
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
311
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
312
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
313
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
314
+
315
+ def forward(
316
+ self,
317
+ hidden_states: torch.FloatTensor,
318
+ attention_mask: torch.FloatTensor,
319
+ position_ids: torch.LongTensor,
320
+ head_mask: Optional[torch.FloatTensor] = None,
321
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
322
+ use_cache: Optional[bool] = False,
323
+ output_attentions: Optional[bool] = False,
324
+ ):
325
+ has_layer_past = layer_past is not None
326
+
327
+ # Compute QKV
328
+ # Attention heads [batch, seq_len, hidden_size]
329
+ # --> [batch, seq_len, (np * 3 * head_size)]
330
+ qkv = self.query_key_value(hidden_states)
331
+
332
+ # [batch, seq_len, (num_heads * 3 * head_size)]
333
+ # --> [batch, seq_len, num_heads, 3 * head_size]
334
+ new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
335
+ qkv = qkv.view(*new_qkv_shape)
336
+
337
+ # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
338
+ query = qkv[..., : self.head_size].permute(0, 2, 1, 3)
339
+ key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)
340
+ value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)
341
+
342
+ query_length = query.shape[-2]
343
+
344
+ # Compute rotary embeddings on rotary_ndims
345
+ query_rot = query[..., : self.rotary_ndims]
346
+ query_pass = query[..., self.rotary_ndims :]
347
+ key_rot = key[..., : self.rotary_ndims]
348
+ key_pass = key[..., self.rotary_ndims :]
349
+
350
+ # Compute token offset for rotary embeddings (when decoding)
351
+ seq_len = key.shape[-2]
352
+ if has_layer_past:
353
+ seq_len += layer_past[0].shape[-2]
354
+ cos, sin = self.rotary_emb(value, seq_len=seq_len)
355
+ query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
356
+ query = torch.cat((query, query_pass), dim=-1)
357
+ key = torch.cat((key, key_pass), dim=-1)
358
+
359
+ # Cache QKV values
360
+ if has_layer_past:
361
+ past_key = layer_past[0]
362
+ past_value = layer_past[1]
363
+ key = torch.cat((past_key, key), dim=-2)
364
+ value = torch.cat((past_value, value), dim=-2)
365
+ present = (key, value) if use_cache else None
366
+
367
+ # GPT-neo-X casts query and key in fp32 to apply rotary embedding in full precision
368
+ target_dtype = value.dtype
369
+ if query.dtype != target_dtype:
370
+ query = query.to(target_dtype)
371
+ if key.dtype != target_dtype:
372
+ key = key.to(target_dtype)
373
+
374
+ # Permute to get the expected shape for Flash Attention
375
+ query = query.permute(0, 2, 1, 3)
376
+ key = key.permute(0, 2, 1, 3)
377
+ value = value.permute(0, 2, 1, 3)
378
+
379
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
380
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
381
+ # cast them back in float16 / bfloat16 just to be sure everything works as expected.
382
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
383
+ input_dtype = query.dtype
384
+ if input_dtype == torch.float32:
385
+ if torch.is_autocast_enabled():
386
+ target_dtype = torch.get_autocast_gpu_dtype()
387
+ # Handle the case where the model is quantized
388
+ elif hasattr(self.config, "_pre_quantization_dtype"):
389
+ target_dtype = self.config._pre_quantization_dtype
390
+ else:
391
+ target_dtype = self.query_key_value.weight.dtype
392
+
393
+ logger.warning_once(
394
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
395
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
396
+ f" {target_dtype}."
397
+ )
398
+
399
+ query = query.to(target_dtype)
400
+ key = key.to(target_dtype)
401
+ value = value.to(target_dtype)
402
+
403
+ attention_dropout = self.config.attention_dropout if self.training else 0.0
404
+
405
+ # Compute attention
406
+ attn_weights = self._flash_attention_forward(
407
+ query, key, value, attention_mask, query_length, dropout=attention_dropout, softmax_scale=self.norm_factor
408
+ )
409
+
410
+ # Reshape outputs
411
+ attn_output = attn_weights.reshape(
412
+ attn_weights.shape[0], attn_weights.shape[1], self.num_attention_heads * self.head_size
413
+ )
414
+ attn_output = self.dense(attn_output)
415
+
416
+ outputs = (attn_output, present)
417
+ if output_attentions:
418
+ outputs += (attn_weights,)
419
+
420
+ return outputs
421
+
422
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
423
+ def _flash_attention_forward(
424
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
425
+ ):
426
+ """
427
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
428
+ first unpad the input, then computes the attention scores and pad the final attention scores.
429
+
430
+ Args:
431
+ query_states (`torch.Tensor`):
432
+ Input query states to be passed to Flash Attention API
433
+ key_states (`torch.Tensor`):
434
+ Input key states to be passed to Flash Attention API
435
+ value_states (`torch.Tensor`):
436
+ Input value states to be passed to Flash Attention API
437
+ attention_mask (`torch.Tensor`):
438
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
439
+ position of padding tokens and 1 for the position of non-padding tokens.
440
+ dropout (`float`):
441
+ Attention dropout
442
+ softmax_scale (`float`, *optional*):
443
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
444
+ """
445
+ if not self._flash_attn_uses_top_left_mask:
446
+ causal = self.is_causal
447
+ else:
448
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
449
+ causal = self.is_causal and query_length != 1
450
+
451
+ # Contains at least one padding token in the sequence
452
+ if attention_mask is not None:
453
+ batch_size = query_states.shape[0]
454
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
455
+ query_states, key_states, value_states, attention_mask, query_length
456
+ )
457
+
458
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
459
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
460
+
461
+ attn_output_unpad = flash_attn_varlen_func(
462
+ query_states,
463
+ key_states,
464
+ value_states,
465
+ cu_seqlens_q=cu_seqlens_q,
466
+ cu_seqlens_k=cu_seqlens_k,
467
+ max_seqlen_q=max_seqlen_in_batch_q,
468
+ max_seqlen_k=max_seqlen_in_batch_k,
469
+ dropout_p=dropout,
470
+ softmax_scale=softmax_scale,
471
+ causal=causal,
472
+ )
473
+
474
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
475
+ else:
476
+ attn_output = flash_attn_func(
477
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
478
+ )
479
+
480
+ return attn_output
481
+
482
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads
483
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
484
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
485
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
486
+
487
+ key_layer = index_first_axis(
488
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
489
+ )
490
+ value_layer = index_first_axis(
491
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
492
+ )
493
+ if query_length == kv_seq_len:
494
+ query_layer = index_first_axis(
495
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k
496
+ )
497
+ cu_seqlens_q = cu_seqlens_k
498
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
499
+ indices_q = indices_k
500
+ elif query_length == 1:
501
+ max_seqlen_in_batch_q = 1
502
+ cu_seqlens_q = torch.arange(
503
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
504
+ ) # There is a memcpy here, that is very bad.
505
+ indices_q = cu_seqlens_q[:-1]
506
+ query_layer = query_layer.squeeze(1)
507
+ else:
508
+ # The -q_len: slice assumes left padding.
509
+ attention_mask = attention_mask[:, -query_length:]
510
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
511
+
512
+ return (
513
+ query_layer,
514
+ key_layer,
515
+ value_layer,
516
+ indices_q,
517
+ (cu_seqlens_q, cu_seqlens_k),
518
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
519
+ )
520
+
521
+
522
+ def attention_mask_func(attention_scores, ltor_mask):
523
+ attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min)
524
+ return attention_scores
525
+
526
+
527
+ class GPTNeoXRotaryEmbedding(nn.Module):
528
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding.__init__
529
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
530
+ super().__init__()
531
+
532
+ self.dim = dim
533
+ self.max_position_embeddings = max_position_embeddings
534
+ self.base = base
535
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
536
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
537
+
538
+ # Build here to make `torch.jit.trace` work.
539
+ self._set_cos_sin_cache(
540
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
541
+ )
542
+
543
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
544
+ self.max_seq_len_cached = seq_len
545
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
546
+
547
+ freqs = torch.outer(t, self.inv_freq)
548
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
549
+ emb = torch.cat((freqs, freqs), dim=-1)
550
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
551
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
552
+
553
+ def forward(self, x, seq_len=None):
554
+ # x: [bs, num_attention_heads, seq_len, head_size]
555
+ if seq_len > self.max_seq_len_cached:
556
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
557
+
558
+ return (
559
+ self.cos_cached[:seq_len],
560
+ self.sin_cached[:seq_len],
561
+ )
562
+
563
+
564
+ # copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding.__init__
565
+ # TODO @gante bring compatibility back
566
+ class GPTNeoXLinearScalingRotaryEmbedding(GPTNeoXRotaryEmbedding):
567
+ """GPTNeoXRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
568
+
569
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
570
+ self.scaling_factor = scaling_factor
571
+ super().__init__(dim, max_position_embeddings, base, device)
572
+
573
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
574
+ self.max_seq_len_cached = seq_len
575
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
576
+ t = t / self.scaling_factor
577
+
578
+ freqs = torch.outer(t, self.inv_freq)
579
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
580
+ emb = torch.cat((freqs, freqs), dim=-1)
581
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
582
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
583
+
584
+
585
+ class GPTNeoXDynamicNTKScalingRotaryEmbedding(GPTNeoXRotaryEmbedding):
586
+ """GPTNeoXRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
587
+
588
+ # copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding.__init__
589
+ # TODO @gante no longer copied from
590
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
591
+ self.scaling_factor = scaling_factor
592
+ super().__init__(dim, max_position_embeddings, base, device)
593
+
594
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
595
+ self.max_seq_len_cached = seq_len
596
+
597
+ if seq_len > self.max_position_embeddings:
598
+ base = self.base * (
599
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
600
+ ) ** (self.dim / (self.dim - 2))
601
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
602
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
603
+
604
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
605
+
606
+ freqs = torch.outer(t, self.inv_freq)
607
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
608
+ emb = torch.cat((freqs, freqs), dim=-1)
609
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
610
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
611
+
612
+
613
+ def rotate_half(x):
614
+ """Rotates half the hidden dims of the input."""
615
+ x1 = x[..., : x.shape[-1] // 2]
616
+ x2 = x[..., x.shape[-1] // 2 :]
617
+ return torch.cat((-x2, x1), dim=-1)
618
+
619
+
620
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
621
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
622
+ """Applies Rotary Position Embedding to the query and key tensors.
623
+
624
+ Args:
625
+ q (`torch.Tensor`): The query tensor.
626
+ k (`torch.Tensor`): The key tensor.
627
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
628
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
629
+ position_ids (`torch.Tensor`):
630
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
631
+ used to pass offsetted position ids when working with a KV-cache.
632
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
633
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
634
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
635
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
636
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
637
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
638
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
639
+ Returns:
640
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
641
+ """
642
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
643
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
644
+ q_embed = (q * cos) + (rotate_half(q) * sin)
645
+ k_embed = (k * cos) + (rotate_half(k) * sin)
646
+ return q_embed, k_embed
647
+
648
+
649
+ class GPTNeoXMLP(nn.Module):
650
+ def __init__(self, config):
651
+ super().__init__()
652
+ self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)
653
+ self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)
654
+ self.act = ACT2FN[config.hidden_act]
655
+
656
+ def forward(self, hidden_states):
657
+ hidden_states = self.dense_h_to_4h(hidden_states)
658
+ hidden_states = self.act(hidden_states)
659
+ hidden_states = self.dense_4h_to_h(hidden_states)
660
+ return hidden_states
661
+
662
+
663
+ GPT_NEOX_ATTENTION_CLASSES = {
664
+ "eager": GPTNeoXAttention,
665
+ "flash_attention_2": GPTNeoXFlashAttention2,
666
+ }
667
+
668
+
669
+ class GPTNeoXLayer(nn.Module):
670
+ def __init__(self, config):
671
+ super().__init__()
672
+ self.use_parallel_residual = config.use_parallel_residual
673
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
674
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
675
+ self.post_attention_dropout = nn.Dropout(config.hidden_dropout)
676
+ self.post_mlp_dropout = nn.Dropout(config.hidden_dropout)
677
+ self.attention = GPT_NEOX_ATTENTION_CLASSES[config._attn_implementation](config)
678
+ self.mlp = GPTNeoXMLP(config)
679
+
680
+ def forward(
681
+ self,
682
+ hidden_states: Optional[torch.FloatTensor],
683
+ attention_mask: Optional[torch.FloatTensor] = None,
684
+ position_ids: Optional[torch.LongTensor] = None,
685
+ head_mask: Optional[torch.FloatTensor] = None,
686
+ use_cache: Optional[bool] = False,
687
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
688
+ output_attentions: Optional[bool] = False,
689
+ ):
690
+ attention_layer_outputs = self.attention(
691
+ self.input_layernorm(hidden_states),
692
+ attention_mask=attention_mask,
693
+ position_ids=position_ids,
694
+ layer_past=layer_past,
695
+ head_mask=head_mask,
696
+ use_cache=use_cache,
697
+ output_attentions=output_attentions,
698
+ )
699
+ attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)
700
+ attn_output = self.post_attention_dropout(attn_output)
701
+ outputs = attention_layer_outputs[1:]
702
+
703
+ if self.use_parallel_residual:
704
+ # pseudocode:
705
+ # x = x + attn(ln1(x)) + mlp(ln2(x))
706
+ mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))
707
+ mlp_output = self.post_mlp_dropout(mlp_output)
708
+ hidden_states = mlp_output + attn_output + hidden_states
709
+ else:
710
+ # pseudocode:
711
+ # x = x + attn(ln1(x))
712
+ # x = x + mlp(ln2(x))
713
+ attn_output = attn_output + hidden_states
714
+ mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
715
+ mlp_output = self.post_mlp_dropout(mlp_output)
716
+ hidden_states = mlp_output + attn_output
717
+
718
+ if use_cache:
719
+ outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)
720
+ else:
721
+ outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)
722
+
723
+ return outputs
724
+
725
+
726
+ GPT_NEOX_START_DOCSTRING = r"""
727
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
728
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
729
+ behavior.
730
+
731
+ Parameters:
732
+ config ([`~GPTNeoXConfig`]): Model configuration class with all the parameters of the model.
733
+ Initializing with a config file does not load the weights associated with the model, only the
734
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
735
+ """
736
+
737
+ GPT_NEOX_INPUTS_DOCSTRING = r"""
738
+ Args:
739
+ input_ids (`torch.LongTensor` of shape `({0})`):
740
+ Indices of input sequence tokens in the vocabulary.
741
+
742
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
743
+ [`PreTrainedTokenizer.__call__`] for details.
744
+
745
+ [What are input IDs?](../glossary#input-ids)
746
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
747
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
748
+
749
+ - 1 for tokens that are **not masked**,
750
+ - 0 for tokens that are **masked**.
751
+
752
+ [What are attention masks?](../glossary#attention-mask)
753
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
754
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
755
+ config.n_positions - 1]`.
756
+
757
+ [What are position IDs?](../glossary#position-ids)
758
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
759
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
760
+
761
+ - 1 indicates the head is **not masked**,
762
+ - 0 indicates the head is **masked**.
763
+
764
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
765
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
766
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
767
+ model's internal embedding lookup matrix.
768
+ output_attentions (`bool`, *optional*):
769
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
770
+ tensors for more detail.
771
+ output_hidden_states (`bool`, *optional*):
772
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
773
+ more detail.
774
+ return_dict (`bool`, *optional*):
775
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
776
+ """
777
+
778
+
779
+ @add_start_docstrings(
780
+ "The bare GPTNeoX Model transformer outputting raw hidden-states without any specific head on top.",
781
+ GPT_NEOX_START_DOCSTRING,
782
+ )
783
+ class GPTNeoXModel(GPTNeoXPreTrainedModel):
784
+ def __init__(self, config):
785
+ super().__init__(config)
786
+ self.config = config
787
+
788
+ self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size)
789
+ self.emb_dropout = nn.Dropout(config.hidden_dropout)
790
+ self.layers = nn.ModuleList([GPTNeoXLayer(config) for _ in range(config.num_hidden_layers)])
791
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
792
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
793
+
794
+ self.gradient_checkpointing = False
795
+
796
+ # Initialize weights and apply final processing
797
+ self.post_init()
798
+
799
+ def get_input_embeddings(self):
800
+ return self.embed_in
801
+
802
+ def set_input_embeddings(self, value):
803
+ self.embed_in = value
804
+
805
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
806
+ @add_code_sample_docstrings(
807
+ checkpoint=_CHECKPOINT_FOR_DOC,
808
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
809
+ output_type=BaseModelOutputWithPast,
810
+ config_class=_CONFIG_FOR_DOC,
811
+ )
812
+ def forward(
813
+ self,
814
+ input_ids: Optional[torch.LongTensor] = None,
815
+ attention_mask: Optional[torch.FloatTensor] = None,
816
+ position_ids: Optional[torch.LongTensor] = None,
817
+ head_mask: Optional[torch.FloatTensor] = None,
818
+ inputs_embeds: Optional[torch.FloatTensor] = None,
819
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
820
+ use_cache: Optional[bool] = None,
821
+ output_attentions: Optional[bool] = None,
822
+ output_hidden_states: Optional[bool] = None,
823
+ return_dict: Optional[bool] = None,
824
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
825
+ r"""
826
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
827
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
828
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
829
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
830
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
831
+ use_cache (`bool`, *optional*):
832
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
833
+ `past_key_values`).
834
+ """
835
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
836
+ output_hidden_states = (
837
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
838
+ )
839
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
840
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
841
+
842
+ if input_ids is not None and inputs_embeds is not None:
843
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
844
+ elif input_ids is not None:
845
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
846
+ input_shape = input_ids.size()
847
+ elif inputs_embeds is not None:
848
+ input_shape = inputs_embeds.size()[:-1]
849
+ else:
850
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
851
+
852
+ batch_size, seq_length = input_shape
853
+
854
+ if past_key_values is None:
855
+ past_length = 0
856
+ past_key_values = tuple([None] * self.config.num_hidden_layers)
857
+ else:
858
+ past_length = past_key_values[0][0].size(-2)
859
+
860
+ if position_ids is None:
861
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
862
+ position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device)
863
+ position_ids = position_ids.unsqueeze(0)
864
+
865
+ # Attention mask.
866
+ if attention_mask is not None:
867
+ assert batch_size > 0, "batch_size has to be defined and > 0"
868
+ attention_mask = attention_mask.view(batch_size, -1)
869
+ if self._use_flash_attention_2:
870
+ attention_mask = attention_mask if 0 in attention_mask else None
871
+ else:
872
+ # We create a 3D attention mask from a 2D tensor mask.
873
+ # Sizes are [batch_size, 1, 1, to_seq_length]
874
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
875
+ # this attention mask is more simple than the triangular masking of causal attention
876
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
877
+ attention_mask = attention_mask[:, None, None, :]
878
+
879
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
880
+ # masked positions, this operation will create a tensor which is 0.0 for
881
+ # positions we want to attend and the dtype's smallest value for masked positions.
882
+ # Since we are adding it to the raw scores before the softmax, this is
883
+ # effectively the same as removing these entirely.
884
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
885
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
886
+
887
+ # Prepare head mask if needed
888
+ # 1.0 in head_mask indicate we keep the head
889
+ # attention_probs has shape bsz x n_heads x N x N
890
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
891
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
892
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
893
+
894
+ if inputs_embeds is None:
895
+ inputs_embeds = self.embed_in(input_ids)
896
+
897
+ hidden_states = self.emb_dropout(inputs_embeds)
898
+
899
+ if self.gradient_checkpointing and self.training:
900
+ if use_cache:
901
+ logger.warning(
902
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
903
+ )
904
+ use_cache = False
905
+
906
+ presents = () if use_cache else None
907
+ all_attentions = () if output_attentions else None
908
+ all_hidden_states = () if output_hidden_states else None
909
+ for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
910
+ if output_hidden_states:
911
+ all_hidden_states = all_hidden_states + (hidden_states,)
912
+
913
+ if self.gradient_checkpointing and self.training:
914
+ outputs = self._gradient_checkpointing_func(
915
+ layer.__call__,
916
+ hidden_states,
917
+ attention_mask,
918
+ position_ids,
919
+ head_mask[i],
920
+ use_cache,
921
+ None,
922
+ output_attentions,
923
+ )
924
+ else:
925
+ outputs = layer(
926
+ hidden_states,
927
+ attention_mask=attention_mask,
928
+ position_ids=position_ids,
929
+ head_mask=head_mask[i],
930
+ layer_past=layer_past,
931
+ use_cache=use_cache,
932
+ output_attentions=output_attentions,
933
+ )
934
+ hidden_states = outputs[0]
935
+ if use_cache is True:
936
+ presents = presents + (outputs[1],)
937
+ if output_attentions:
938
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
939
+
940
+ hidden_states = self.final_layer_norm(hidden_states)
941
+ # Add last hidden state
942
+ if output_hidden_states:
943
+ all_hidden_states = all_hidden_states + (hidden_states,)
944
+
945
+ if not return_dict:
946
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
947
+
948
+ return BaseModelOutputWithPast(
949
+ last_hidden_state=hidden_states,
950
+ past_key_values=presents,
951
+ hidden_states=all_hidden_states,
952
+ attentions=all_attentions,
953
+ )
954
+
955
+
956
+ @add_start_docstrings(
957
+ """GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.""", GPT_NEOX_START_DOCSTRING
958
+ )
959
+ class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):
960
+ _tied_weights_keys = ["embed_out.weight"]
961
+
962
+ def __init__(self, config):
963
+ super().__init__(config)
964
+
965
+ self.gpt_neox = GPTNeoXModel(config)
966
+ self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
967
+
968
+ # Initialize weights and apply final processing
969
+ self.post_init()
970
+
971
+ def get_output_embeddings(self):
972
+ return self.embed_out
973
+
974
+ def set_output_embeddings(self, new_embeddings):
975
+ self.embed_out = new_embeddings
976
+
977
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
978
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
979
+ def forward(
980
+ self,
981
+ input_ids: Optional[torch.LongTensor] = None,
982
+ attention_mask: Optional[torch.FloatTensor] = None,
983
+ position_ids: Optional[torch.LongTensor] = None,
984
+ inputs_embeds: Optional[torch.FloatTensor] = None,
985
+ head_mask: Optional[torch.FloatTensor] = None,
986
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
987
+ labels: Optional[torch.LongTensor] = None,
988
+ use_cache: Optional[bool] = None,
989
+ output_attentions: Optional[bool] = None,
990
+ output_hidden_states: Optional[bool] = None,
991
+ return_dict: Optional[bool] = None,
992
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
993
+ r"""
994
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
995
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
996
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
997
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are
998
+ only required when the model is used as a decoder in a Sequence to Sequence model.
999
+
1000
+ Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see
1001
+ `past_key_values` input) to speed up sequential decoding.
1002
+
1003
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1004
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1005
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1006
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1007
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1008
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1009
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
1010
+ use_cache (`bool`, *optional*):
1011
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1012
+ `past_key_values`).
1013
+
1014
+ Returns:
1015
+
1016
+ Example:
1017
+
1018
+ ```python
1019
+ >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig
1020
+ >>> import torch
1021
+
1022
+ >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
1023
+ >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b")
1024
+ >>> config.is_decoder = True
1025
+ >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config)
1026
+
1027
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1028
+ >>> outputs = model(**inputs)
1029
+
1030
+ >>> prediction_logits = outputs.logits
1031
+ ```"""
1032
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1033
+
1034
+ outputs = self.gpt_neox(
1035
+ input_ids,
1036
+ attention_mask=attention_mask,
1037
+ position_ids=position_ids,
1038
+ head_mask=head_mask,
1039
+ inputs_embeds=inputs_embeds,
1040
+ past_key_values=past_key_values,
1041
+ use_cache=use_cache,
1042
+ output_attentions=output_attentions,
1043
+ output_hidden_states=output_hidden_states,
1044
+ return_dict=return_dict,
1045
+ )
1046
+
1047
+ hidden_states = outputs[0]
1048
+ lm_logits = self.embed_out(hidden_states)
1049
+
1050
+ lm_loss = None
1051
+ if labels is not None:
1052
+ # move labels to correct device to enable model parallelism
1053
+ labels = labels.to(lm_logits.device)
1054
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1055
+ shift_logits = lm_logits[:, :-1, :].contiguous()
1056
+ labels = labels[:, 1:].contiguous()
1057
+ loss_fct = CrossEntropyLoss()
1058
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))
1059
+
1060
+ if not return_dict:
1061
+ output = (lm_logits,) + outputs[1:]
1062
+ return ((lm_loss,) + output) if lm_loss is not None else output
1063
+
1064
+ return CausalLMOutputWithPast(
1065
+ loss=lm_loss,
1066
+ logits=lm_logits,
1067
+ past_key_values=outputs.past_key_values,
1068
+ hidden_states=outputs.hidden_states,
1069
+ attentions=outputs.attentions,
1070
+ )
1071
+
1072
+ def prepare_inputs_for_generation(
1073
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1074
+ ):
1075
+ input_shape = input_ids.shape
1076
+ # cut decoder_input_ids if past is used
1077
+ if past_key_values is not None:
1078
+ past_length = past_key_values[0][0].shape[2]
1079
+
1080
+ # Some generation methods already pass only the last input ID
1081
+ if input_ids.shape[1] > past_length:
1082
+ remove_prefix_length = past_length
1083
+ else:
1084
+ # Default to old behavior: keep only final ID
1085
+ remove_prefix_length = input_ids.shape[1] - 1
1086
+
1087
+ input_ids = input_ids[:, remove_prefix_length:]
1088
+
1089
+ position_ids = kwargs.get("position_ids", None)
1090
+ if attention_mask is not None and position_ids is None:
1091
+ # create position_ids on the fly for batch generation
1092
+ position_ids = attention_mask.long().cumsum(-1) - 1
1093
+ position_ids.masked_fill_(attention_mask == 0, 1)
1094
+ if past_key_values:
1095
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1096
+
1097
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1098
+ if attention_mask is None:
1099
+ attention_mask = input_ids.new_ones(input_shape)
1100
+
1101
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1102
+ if inputs_embeds is not None and past_key_values is None:
1103
+ model_inputs = {"inputs_embeds": inputs_embeds}
1104
+ else:
1105
+ model_inputs = {"input_ids": input_ids}
1106
+ model_inputs.update(
1107
+ {
1108
+ "attention_mask": attention_mask,
1109
+ "past_key_values": past_key_values,
1110
+ "position_ids": position_ids,
1111
+ }
1112
+ )
1113
+
1114
+ return model_inputs
1115
+
1116
+ def _reorder_cache(self, past_key_values, beam_idx):
1117
+ reordered_past = ()
1118
+ for layer_past in past_key_values:
1119
+ reordered_past += (
1120
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1121
+ + layer_past[2:],
1122
+ )
1123
+ return reordered_past
1124
+
1125
+
1126
+ @add_start_docstrings(
1127
+ """
1128
+ The GPTNeoX Model transformer with a sequence classification head on top (linear layer).
1129
+
1130
+ [`GPTNeoXForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1131
+ (e.g. GPT-1) do.
1132
+
1133
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1134
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1135
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1136
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1137
+ each row of the batch).
1138
+ """,
1139
+ GPT_NEOX_START_DOCSTRING,
1140
+ )
1141
+ class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel):
1142
+ def __init__(self, config):
1143
+ super().__init__(config)
1144
+ self.num_labels = config.num_labels
1145
+ self.gpt_neox = GPTNeoXModel(config)
1146
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1147
+
1148
+ # Initialize weights and apply final processing
1149
+ self.post_init()
1150
+
1151
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING)
1152
+ @add_code_sample_docstrings(
1153
+ checkpoint=_CHECKPOINT_FOR_DOC,
1154
+ output_type=SequenceClassifierOutputWithPast,
1155
+ config_class=_CONFIG_FOR_DOC,
1156
+ )
1157
+ def forward(
1158
+ self,
1159
+ input_ids: Optional[torch.LongTensor] = None,
1160
+ attention_mask: Optional[torch.FloatTensor] = None,
1161
+ position_ids: Optional[torch.LongTensor] = None,
1162
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1163
+ head_mask: Optional[torch.FloatTensor] = None,
1164
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1165
+ labels: Optional[torch.LongTensor] = None,
1166
+ use_cache: Optional[bool] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
1171
+ r"""
1172
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1173
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1174
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1175
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1176
+ """
1177
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1178
+
1179
+ outputs = self.gpt_neox(
1180
+ input_ids,
1181
+ attention_mask=attention_mask,
1182
+ position_ids=position_ids,
1183
+ head_mask=head_mask,
1184
+ inputs_embeds=inputs_embeds,
1185
+ past_key_values=past_key_values,
1186
+ use_cache=use_cache,
1187
+ output_attentions=output_attentions,
1188
+ output_hidden_states=output_hidden_states,
1189
+ return_dict=return_dict,
1190
+ )
1191
+ hidden_states = outputs[0]
1192
+ logits = self.score(hidden_states)
1193
+
1194
+ if input_ids is not None:
1195
+ batch_size, sequence_length = input_ids.shape[:2]
1196
+ else:
1197
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1198
+
1199
+ if self.config.pad_token_id is None and batch_size != 1:
1200
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1201
+ if self.config.pad_token_id is None:
1202
+ sequence_lengths = -1
1203
+ else:
1204
+ if input_ids is not None:
1205
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1206
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1207
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1208
+ sequence_lengths = sequence_lengths.to(logits.device)
1209
+ else:
1210
+ sequence_lengths = -1
1211
+ logger.warning(
1212
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1213
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1214
+ )
1215
+
1216
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1217
+
1218
+ loss = None
1219
+ if labels is not None:
1220
+ labels = labels.to(logits.device)
1221
+ if self.config.problem_type is None:
1222
+ if self.num_labels == 1:
1223
+ self.config.problem_type = "regression"
1224
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1225
+ self.config.problem_type = "single_label_classification"
1226
+ else:
1227
+ self.config.problem_type = "multi_label_classification"
1228
+
1229
+ if self.config.problem_type == "regression":
1230
+ loss_fct = MSELoss()
1231
+ if self.num_labels == 1:
1232
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1233
+ else:
1234
+ loss = loss_fct(pooled_logits, labels)
1235
+ elif self.config.problem_type == "single_label_classification":
1236
+ loss_fct = CrossEntropyLoss()
1237
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1238
+ elif self.config.problem_type == "multi_label_classification":
1239
+ loss_fct = BCEWithLogitsLoss()
1240
+ loss = loss_fct(pooled_logits, labels)
1241
+ if not return_dict:
1242
+ output = (pooled_logits,) + outputs[1:]
1243
+ return ((loss,) + output) if loss is not None else output
1244
+
1245
+ return SequenceClassifierOutputWithPast(
1246
+ loss=loss,
1247
+ logits=pooled_logits,
1248
+ past_key_values=outputs.past_key_values,
1249
+ hidden_states=outputs.hidden_states,
1250
+ attentions=outputs.attentions,
1251
+ )
1252
+
1253
+
1254
+ class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel):
1255
+ def __init__(self, config):
1256
+ super().__init__(config)
1257
+ self.num_labels = config.num_labels
1258
+
1259
+ self.gpt_neox = GPTNeoXModel(config)
1260
+ self.dropout = nn.Dropout(config.classifier_dropout)
1261
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1262
+
1263
+ # Initialize weights and apply final processing
1264
+ self.post_init()
1265
+
1266
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING)
1267
+ @add_code_sample_docstrings(
1268
+ checkpoint="LarsJonasson/pythia-410m-deduped-sft-swedish",
1269
+ output_type=TokenClassifierOutput,
1270
+ config_class=_CONFIG_FOR_DOC,
1271
+ expected_loss=0.25,
1272
+ )
1273
+ def forward(
1274
+ self,
1275
+ input_ids: Optional[torch.LongTensor] = None,
1276
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1277
+ attention_mask: Optional[torch.FloatTensor] = None,
1278
+ token_type_ids: Optional[torch.LongTensor] = None,
1279
+ position_ids: Optional[torch.LongTensor] = None,
1280
+ head_mask: Optional[torch.FloatTensor] = None,
1281
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1282
+ labels: Optional[torch.LongTensor] = None,
1283
+ use_cache: Optional[bool] = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ ) -> Union[Tuple, TokenClassifierOutput]:
1288
+ r"""
1289
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1290
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1291
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1292
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1293
+ """
1294
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1295
+
1296
+ outputs = self.gpt_neox(
1297
+ input_ids,
1298
+ past_key_values=past_key_values,
1299
+ attention_mask=attention_mask,
1300
+ position_ids=position_ids,
1301
+ head_mask=head_mask,
1302
+ inputs_embeds=inputs_embeds,
1303
+ use_cache=use_cache,
1304
+ output_attentions=output_attentions,
1305
+ output_hidden_states=output_hidden_states,
1306
+ return_dict=return_dict,
1307
+ )
1308
+
1309
+ hidden_states = outputs[0]
1310
+ hidden_states = self.dropout(hidden_states)
1311
+ logits = self.classifier(hidden_states)
1312
+
1313
+ loss = None
1314
+ if labels is not None:
1315
+ labels = labels.to(logits.device)
1316
+ loss_fct = CrossEntropyLoss()
1317
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[2:]
1321
+ return ((loss,) + output) if loss is not None else output
1322
+
1323
+ return TokenClassifierOutput(
1324
+ loss=loss,
1325
+ logits=logits,
1326
+ hidden_states=outputs.hidden_states,
1327
+ attentions=outputs.attentions,
1328
+ )
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ """
1333
+ The GPT-NeoX Model transformer with a span classification head on top for extractive question-answering tasks like
1334
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1335
+ """,
1336
+ GPT_NEOX_START_DOCSTRING,
1337
+ )
1338
+ class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel):
1339
+ def __init__(self, config):
1340
+ super().__init__(config)
1341
+ self.num_labels = config.num_labels
1342
+ self.gpt_neox = GPTNeoXModel(config)
1343
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1344
+
1345
+ # Initialize weights and apply final processing
1346
+ self.post_init()
1347
+
1348
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1349
+ @add_code_sample_docstrings(
1350
+ checkpoint=_CHECKPOINT_FOR_DOC,
1351
+ output_type=QuestionAnsweringModelOutput,
1352
+ config_class=_CONFIG_FOR_DOC,
1353
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1354
+ )
1355
+ def forward(
1356
+ self,
1357
+ input_ids: Optional[torch.LongTensor] = None,
1358
+ attention_mask: Optional[torch.FloatTensor] = None,
1359
+ token_type_ids: Optional[torch.LongTensor] = None,
1360
+ position_ids: Optional[torch.LongTensor] = None,
1361
+ head_mask: Optional[torch.FloatTensor] = None,
1362
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1363
+ start_positions: Optional[torch.LongTensor] = None,
1364
+ end_positions: Optional[torch.LongTensor] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ return_dict: Optional[bool] = None,
1368
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1369
+ r"""
1370
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1371
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1372
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1373
+ are not taken into account for computing the loss.
1374
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ outputs = self.gpt_neox(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ position_ids=position_ids,
1385
+ head_mask=head_mask,
1386
+ inputs_embeds=inputs_embeds,
1387
+ output_attentions=output_attentions,
1388
+ output_hidden_states=output_hidden_states,
1389
+ return_dict=return_dict,
1390
+ )
1391
+
1392
+ sequence_output = outputs[0]
1393
+
1394
+ logits = self.qa_outputs(sequence_output)
1395
+ start_logits, end_logits = logits.split(1, dim=-1)
1396
+ start_logits = start_logits.squeeze(-1).contiguous()
1397
+ end_logits = end_logits.squeeze(-1).contiguous()
1398
+
1399
+ total_loss = None
1400
+ if start_positions is not None and end_positions is not None:
1401
+ # If we are on multi-GPU, split add a dimension
1402
+ if len(start_positions.size()) > 1:
1403
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1404
+ if len(end_positions.size()) > 1:
1405
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1406
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1407
+ ignored_index = start_logits.size(1)
1408
+ start_positions = start_positions.clamp(0, ignored_index)
1409
+ end_positions = end_positions.clamp(0, ignored_index)
1410
+
1411
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1412
+ start_loss = loss_fct(start_logits, start_positions)
1413
+ end_loss = loss_fct(end_logits, end_positions)
1414
+ total_loss = (start_loss + end_loss) / 2
1415
+
1416
+ if not return_dict:
1417
+ output = (start_logits, end_logits) + outputs[2:]
1418
+ return ((total_loss,) + output) if total_loss is not None else output
1419
+
1420
+ return QuestionAnsweringModelOutput(
1421
+ loss=total_loss,
1422
+ start_logits=start_logits,
1423
+ end_logits=end_logits,
1424
+ hidden_states=outputs.hidden_states,
1425
+ attentions=outputs.attentions,
1426
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for GPTNeoX."""
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import pre_tokenizers, processors
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
28
+
29
+
30
+ class GPTNeoXTokenizerFast(PreTrainedTokenizerFast):
31
+ """
32
+ Construct a "fast" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
33
+ Byte-Pair-Encoding.
34
+
35
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
36
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
37
+
38
+ ```python
39
+ >>> from transformers import GPTNeoXTokenizerFast
40
+
41
+ >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("openai-community/gpt2")
42
+ >>> tokenizer("Hello world")["input_ids"]
43
+ [15496, 995]
44
+
45
+ >>> tokenizer(" Hello world")["input_ids"]
46
+ [18435, 995]
47
+ ```
48
+
49
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
50
+ the model was not pretrained this way, it might yield a decrease in performance.
51
+
52
+ <Tip>
53
+
54
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
55
+
56
+ </Tip>
57
+
58
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
59
+ refer to this superclass for more information regarding those methods.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ Path to the vocabulary file.
64
+ merges_file (`str`):
65
+ Path to the merges file.
66
+ errors (`str`, *optional*, defaults to `"replace"`):
67
+ Paradigm to follow when decoding bytes to UTF-8. See
68
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
69
+ unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
73
+ The beginning of sequence token.
74
+ eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
75
+ The end of sequence token.
76
+ pad_token (`str`, *optional*):
77
+ Token for padding a sequence.
78
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
79
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
80
+ other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).
81
+ add_bos_token (`bool`, *optional*, defaults to `False`):
82
+ Whether or not to add a `bos_token` at the start of sequences.
83
+ add_eos_token (`bool`, *optional*, defaults to `False`):
84
+ Whether or not to add an `eos_token` at the end of sequences.
85
+ trim_offsets (`bool`, *optional*, defaults to `True`):
86
+ Whether or not the post-processing step should trim offsets to avoid including whitespaces.
87
+ """
88
+
89
+ vocab_files_names = VOCAB_FILES_NAMES
90
+ model_input_names = ["input_ids", "attention_mask"]
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_file=None,
95
+ merges_file=None,
96
+ tokenizer_file=None,
97
+ unk_token="<|endoftext|>",
98
+ bos_token="<|endoftext|>",
99
+ eos_token="<|endoftext|>",
100
+ pad_token=None,
101
+ add_bos_token=False,
102
+ add_eos_token=False,
103
+ add_prefix_space=False,
104
+ **kwargs,
105
+ ):
106
+ super().__init__(
107
+ vocab_file,
108
+ merges_file,
109
+ tokenizer_file=tokenizer_file,
110
+ unk_token=unk_token,
111
+ bos_token=bos_token,
112
+ eos_token=eos_token,
113
+ pad_token=pad_token,
114
+ add_bos_token=add_bos_token,
115
+ add_eos_token=add_eos_token,
116
+ add_prefix_space=add_prefix_space,
117
+ **kwargs,
118
+ )
119
+
120
+ self._add_bos_token = add_bos_token
121
+ self._add_eos_token = add_eos_token
122
+ self.update_post_processor()
123
+
124
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
125
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
126
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
127
+ pre_tok_state["add_prefix_space"] = add_prefix_space
128
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
129
+
130
+ self.add_prefix_space = add_prefix_space
131
+
132
+ @property
133
+ def add_eos_token(self):
134
+ return self._add_eos_token
135
+
136
+ @property
137
+ def add_bos_token(self):
138
+ return self._add_bos_token
139
+
140
+ @add_eos_token.setter
141
+ def add_eos_token(self, value):
142
+ self._add_eos_token = value
143
+ self.update_post_processor()
144
+
145
+ @add_bos_token.setter
146
+ def add_bos_token(self, value):
147
+ self._add_bos_token = value
148
+ self.update_post_processor()
149
+
150
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor
151
+ def update_post_processor(self):
152
+ """
153
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
154
+ """
155
+ bos = self.bos_token
156
+ bos_token_id = self.bos_token_id
157
+ if bos is None and self.add_bos_token:
158
+ raise ValueError("add_bos_token = True but bos_token = None")
159
+
160
+ eos = self.eos_token
161
+ eos_token_id = self.eos_token_id
162
+ if eos is None and self.add_eos_token:
163
+ raise ValueError("add_eos_token = True but eos_token = None")
164
+
165
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
166
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
167
+
168
+ special_tokens = []
169
+ if self.add_bos_token:
170
+ special_tokens.append((bos, bos_token_id))
171
+ if self.add_eos_token:
172
+ special_tokens.append((eos, eos_token_id))
173
+ self._tokenizer.post_processor = processors.TemplateProcessing(
174
+ single=single, pair=pair, special_tokens=special_tokens
175
+ )
176
+
177
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask
178
+ def get_special_tokens_mask(
179
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
180
+ ) -> List[int]:
181
+ """
182
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
183
+ special tokens using the tokenizer `prepare_for_model` method.
184
+
185
+ Args:
186
+ token_ids_0 (`List[int]`):
187
+ List of IDs.
188
+ token_ids_1 (`List[int]`, *optional*):
189
+ Optional second list of IDs for sequence pairs.
190
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
191
+ Whether or not the token list is already formatted with special tokens for the model.
192
+
193
+ Returns:
194
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
195
+ """
196
+ if already_has_special_tokens:
197
+ return super().get_special_tokens_mask(
198
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
199
+ )
200
+
201
+ bos_token_id = [1] if self.add_bos_token else []
202
+ eos_token_id = [1] if self.add_eos_token else []
203
+
204
+ if token_ids_1 is None:
205
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
206
+ return (
207
+ bos_token_id
208
+ + ([0] * len(token_ids_0))
209
+ + eos_token_id
210
+ + bos_token_id
211
+ + ([0] * len(token_ids_1))
212
+ + eos_token_id
213
+ )
214
+
215
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens
216
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
217
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
218
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
219
+
220
+ output = bos_token_id + token_ids_0 + eos_token_id
221
+
222
+ if token_ids_1 is not None:
223
+ output = output + bos_token_id + token_ids_1 + eos_token_id
224
+
225
+ return output
226
+
227
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
228
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
229
+ return tuple(files)
230
+
231
+ @property
232
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template
233
+ def default_chat_template(self):
234
+ """
235
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
236
+ """
237
+ logger.warning_once(
238
+ "\nNo chat template is defined for this tokenizer - using the default template "
239
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
240
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
241
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
242
+ )
243
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"]
44
+
45
+ if TYPE_CHECKING:
46
+ from .processing_layoutxlm import LayoutXLMProcessor
47
+
48
+ try:
49
+ if not is_sentencepiece_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
55
+
56
+ try:
57
+ if not is_tokenizers_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
63
+
64
+ else:
65
+ import sys
66
+
67
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc ADDED
Binary file (7.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc ADDED
Binary file (39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LayoutXLM.
17
+ """
18
+ import warnings
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class LayoutXLMProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a LayoutXLM processor which combines a LayoutXLM image processor and a LayoutXLM tokenizer into a single
29
+ processor.
30
+
31
+ [`LayoutXLMProcessor`] offers all the functionalities you need to prepare data for the model.
32
+
33
+ It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
34
+ get words and normalized bounding boxes. These are then provided to [`LayoutXLMTokenizer`] or
35
+ [`LayoutXLMTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
36
+ `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
37
+ into token-level `labels` for token classification tasks (such as FUNSD, CORD).
38
+
39
+ Args:
40
+ image_processor (`LayoutLMv2ImageProcessor`, *optional*):
41
+ An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
42
+ tokenizer (`LayoutXLMTokenizer` or `LayoutXLMTokenizerFast`, *optional*):
43
+ An instance of [`LayoutXLMTokenizer`] or [`LayoutXLMTokenizerFast`]. The tokenizer is a required input.
44
+ """
45
+
46
+ attributes = ["image_processor", "tokenizer"]
47
+ image_processor_class = "LayoutLMv2ImageProcessor"
48
+ tokenizer_class = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
49
+
50
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
51
+ if "feature_extractor" in kwargs:
52
+ warnings.warn(
53
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
54
+ " instead.",
55
+ FutureWarning,
56
+ )
57
+ feature_extractor = kwargs.pop("feature_extractor")
58
+
59
+ image_processor = image_processor if image_processor is not None else feature_extractor
60
+ if image_processor is None:
61
+ raise ValueError("You need to specify an `image_processor`.")
62
+ if tokenizer is None:
63
+ raise ValueError("You need to specify a `tokenizer`.")
64
+
65
+ super().__init__(image_processor, tokenizer)
66
+
67
+ def __call__(
68
+ self,
69
+ images,
70
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
71
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
72
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
73
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
74
+ add_special_tokens: bool = True,
75
+ padding: Union[bool, str, PaddingStrategy] = False,
76
+ truncation: Union[bool, str, TruncationStrategy] = None,
77
+ max_length: Optional[int] = None,
78
+ stride: int = 0,
79
+ pad_to_multiple_of: Optional[int] = None,
80
+ return_token_type_ids: Optional[bool] = None,
81
+ return_attention_mask: Optional[bool] = None,
82
+ return_overflowing_tokens: bool = False,
83
+ return_special_tokens_mask: bool = False,
84
+ return_offsets_mapping: bool = False,
85
+ return_length: bool = False,
86
+ verbose: bool = True,
87
+ return_tensors: Optional[Union[str, TensorType]] = None,
88
+ **kwargs,
89
+ ) -> BatchEncoding:
90
+ """
91
+ This method first forwards the `images` argument to [`~LayoutLMv2ImagePrpcessor.__call__`]. In case
92
+ [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
93
+ bounding boxes along with the additional arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output,
94
+ together with resized `images`. In case [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to
95
+ `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
96
+ arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output, together with resized `images``.
97
+
98
+ Please refer to the docstring of the above two methods for more information.
99
+ """
100
+ # verify input
101
+ if self.image_processor.apply_ocr and (boxes is not None):
102
+ raise ValueError(
103
+ "You cannot provide bounding boxes "
104
+ "if you initialized the image processor with apply_ocr set to True."
105
+ )
106
+
107
+ if self.image_processor.apply_ocr and (word_labels is not None):
108
+ raise ValueError(
109
+ "You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
110
+ )
111
+
112
+ if return_overflowing_tokens is True and return_offsets_mapping is False:
113
+ raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
114
+
115
+ # first, apply the image processor
116
+ features = self.image_processor(images=images, return_tensors=return_tensors)
117
+
118
+ # second, apply the tokenizer
119
+ if text is not None and self.image_processor.apply_ocr and text_pair is None:
120
+ if isinstance(text, str):
121
+ text = [text] # add batch dimension (as the image processor always adds a batch dimension)
122
+ text_pair = features["words"]
123
+
124
+ encoded_inputs = self.tokenizer(
125
+ text=text if text is not None else features["words"],
126
+ text_pair=text_pair if text_pair is not None else None,
127
+ boxes=boxes if boxes is not None else features["boxes"],
128
+ word_labels=word_labels,
129
+ add_special_tokens=add_special_tokens,
130
+ padding=padding,
131
+ truncation=truncation,
132
+ max_length=max_length,
133
+ stride=stride,
134
+ pad_to_multiple_of=pad_to_multiple_of,
135
+ return_token_type_ids=return_token_type_ids,
136
+ return_attention_mask=return_attention_mask,
137
+ return_overflowing_tokens=return_overflowing_tokens,
138
+ return_special_tokens_mask=return_special_tokens_mask,
139
+ return_offsets_mapping=return_offsets_mapping,
140
+ return_length=return_length,
141
+ verbose=verbose,
142
+ return_tensors=return_tensors,
143
+ **kwargs,
144
+ )
145
+
146
+ # add pixel values
147
+ images = features.pop("pixel_values")
148
+ if return_overflowing_tokens is True:
149
+ images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
150
+ encoded_inputs["image"] = images
151
+
152
+ return encoded_inputs
153
+
154
+ def get_overflowing_images(self, images, overflow_to_sample_mapping):
155
+ # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
156
+ images_with_overflow = []
157
+ for sample_idx in overflow_to_sample_mapping:
158
+ images_with_overflow.append(images[sample_idx])
159
+
160
+ if len(images_with_overflow) != len(overflow_to_sample_mapping):
161
+ raise ValueError(
162
+ "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
163
+ f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
164
+ )
165
+
166
+ return images_with_overflow
167
+
168
+ def batch_decode(self, *args, **kwargs):
169
+ """
170
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
171
+ refer to the docstring of this method for more information.
172
+ """
173
+ return self.tokenizer.batch_decode(*args, **kwargs)
174
+
175
+ def decode(self, *args, **kwargs):
176
+ """
177
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
178
+ to the docstring of this method for more information.
179
+ """
180
+ return self.tokenizer.decode(*args, **kwargs)
181
+
182
+ @property
183
+ def model_input_names(self):
184
+ return ["input_ids", "bbox", "attention_mask", "image"]
185
+
186
+ @property
187
+ def feature_extractor_class(self):
188
+ warnings.warn(
189
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
190
+ FutureWarning,
191
+ )
192
+ return self.image_processor_class
193
+
194
+ @property
195
+ def feature_extractor(self):
196
+ warnings.warn(
197
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
198
+ FutureWarning,
199
+ )
200
+ return self.image_processor
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py ADDED
@@ -0,0 +1,1170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for LayoutXLM model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PreTokenizedInput,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
34
+ from ..xlm_roberta.tokenization_xlm_roberta import (
35
+ SPIECE_UNDERLINE,
36
+ VOCAB_FILES_NAMES,
37
+ )
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
44
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
45
+ Whether or not to encode the sequences with the special tokens relative to their model.
46
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
47
+ Activates and controls padding. Accepts the following values:
48
+
49
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
50
+ sequence if provided).
51
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
52
+ acceptable input length for the model if that argument is not provided.
53
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
54
+ lengths).
55
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
56
+ Activates and controls truncation. Accepts the following values:
57
+
58
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
59
+ to the maximum acceptable input length for the model if that argument is not provided. This will
60
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
61
+ sequences (or a batch of pairs) is provided.
62
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
63
+ maximum acceptable input length for the model if that argument is not provided. This will only
64
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
65
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
66
+ maximum acceptable input length for the model if that argument is not provided. This will only
67
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
68
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
69
+ greater than the model maximum admissible input size).
70
+ max_length (`int`, *optional*):
71
+ Controls the maximum length to use by one of the truncation/padding parameters.
72
+
73
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
74
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
75
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
76
+ stride (`int`, *optional*, defaults to 0):
77
+ If set to a number along with `max_length`, the overflowing tokens returned when
78
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
79
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
80
+ argument defines the number of overlapping tokens.
81
+ pad_to_multiple_of (`int`, *optional*):
82
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
83
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
84
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
85
+ If set, will return tensors instead of list of python integers. Acceptable values are:
86
+
87
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
88
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
89
+ - `'np'`: Return Numpy `np.ndarray` objects.
90
+ return_token_type_ids (`bool`, *optional*):
91
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
92
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
93
+
94
+ [What are token type IDs?](../glossary#token-type-ids)
95
+ return_attention_mask (`bool`, *optional*):
96
+ Whether to return the attention mask. If left to the default, will return the attention mask according
97
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
98
+
99
+ [What are attention masks?](../glossary#attention-mask)
100
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
101
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
102
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
103
+ of returning overflowing tokens.
104
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
105
+ Whether or not to return special tokens mask information.
106
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
107
+ Whether or not to return `(char_start, char_end)` for each token.
108
+
109
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
110
+ Python's tokenizer, this method will raise `NotImplementedError`.
111
+ return_length (`bool`, *optional*, defaults to `False`):
112
+ Whether or not to return the lengths of the encoded inputs.
113
+ verbose (`bool`, *optional*, defaults to `True`):
114
+ Whether or not to print more information and warnings.
115
+ **kwargs: passed to the `self.tokenize()` method
116
+
117
+ Return:
118
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
119
+
120
+ - **input_ids** -- List of token ids to be fed to a model.
121
+
122
+ [What are input IDs?](../glossary#input-ids)
123
+
124
+ - **bbox** -- List of bounding boxes to be fed to a model.
125
+
126
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
127
+ if *"token_type_ids"* is in `self.model_input_names`).
128
+
129
+ [What are token type IDs?](../glossary#token-type-ids)
130
+
131
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
132
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
133
+
134
+ [What are attention masks?](../glossary#attention-mask)
135
+
136
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
137
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
138
+ `return_overflowing_tokens=True`).
139
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
140
+ `return_overflowing_tokens=True`).
141
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
142
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
143
+ - **length** -- The length of the inputs (when `return_length=True`).
144
+ """
145
+
146
+
147
+ class LayoutXLMTokenizer(PreTrainedTokenizer):
148
+ """
149
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
150
+ [SentencePiece](https://github.com/google/sentencepiece).
151
+
152
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
153
+ this superclass for more information regarding those methods.
154
+
155
+ Args:
156
+ vocab_file (`str`):
157
+ Path to the vocabulary file.
158
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
159
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
160
+
161
+ <Tip>
162
+
163
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
164
+ sequence. The token used is the `cls_token`.
165
+
166
+ </Tip>
167
+
168
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
169
+ The end of sequence token.
170
+
171
+ <Tip>
172
+
173
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
174
+ The token used is the `sep_token`.
175
+
176
+ </Tip>
177
+
178
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
179
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
180
+ sequence classification or for a text and a question for question answering. It is also used as the last
181
+ token of a sequence built with special tokens.
182
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
183
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
184
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
185
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
186
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
187
+ token instead.
188
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
189
+ The token used for padding, for example when batching sequences of different lengths.
190
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
191
+ The token used for masking values. This is the token used when training this model with masked language
192
+ modeling. This is the token which the model will try to predict.
193
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
194
+ The bounding box to use for the special [CLS] token.
195
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
196
+ The bounding box to use for the special [SEP] token.
197
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
198
+ The bounding box to use for the special [PAD] token.
199
+ pad_token_label (`int`, *optional*, defaults to -100):
200
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
201
+ CrossEntropyLoss.
202
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
203
+ Whether or not to only label the first subword, in case word labels are provided.
204
+ sp_model_kwargs (`dict`, *optional*):
205
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
206
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
207
+ to set:
208
+
209
+ - `enable_sampling`: Enable subword regularization.
210
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
211
+
212
+ - `nbest_size = {0,1}`: No sampling is performed.
213
+ - `nbest_size > 1`: samples from the nbest_size results.
214
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
215
+ using forward-filtering-and-backward-sampling algorithm.
216
+
217
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
218
+ BPE-dropout.
219
+
220
+ Attributes:
221
+ sp_model (`SentencePieceProcessor`):
222
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
223
+ """
224
+
225
+ vocab_files_names = VOCAB_FILES_NAMES
226
+ model_input_names = ["input_ids", "attention_mask"]
227
+
228
+ def __init__(
229
+ self,
230
+ vocab_file,
231
+ bos_token="<s>",
232
+ eos_token="</s>",
233
+ sep_token="</s>",
234
+ cls_token="<s>",
235
+ unk_token="<unk>",
236
+ pad_token="<pad>",
237
+ mask_token="<mask>",
238
+ cls_token_box=[0, 0, 0, 0],
239
+ sep_token_box=[1000, 1000, 1000, 1000],
240
+ pad_token_box=[0, 0, 0, 0],
241
+ pad_token_label=-100,
242
+ only_label_first_subword=True,
243
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
244
+ **kwargs,
245
+ ) -> None:
246
+ # Mask token behave like a normal word, i.e. include the space before it
247
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
248
+
249
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
250
+
251
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
252
+ self.sp_model.Load(str(vocab_file))
253
+ self.vocab_file = vocab_file
254
+
255
+ # Original fairseq vocab and spm vocab must be "aligned":
256
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
257
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
258
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
259
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
260
+
261
+ # Mimic fairseq token-to-id alignment for the first 4 token
262
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
263
+
264
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
265
+ self.fairseq_offset = 1
266
+
267
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
268
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
269
+
270
+ # additional properties
271
+ self.cls_token_box = cls_token_box
272
+ self.sep_token_box = sep_token_box
273
+ self.pad_token_box = pad_token_box
274
+ self.pad_token_label = pad_token_label
275
+ self.only_label_first_subword = only_label_first_subword
276
+
277
+ super().__init__(
278
+ bos_token=bos_token,
279
+ eos_token=eos_token,
280
+ unk_token=unk_token,
281
+ sep_token=sep_token,
282
+ cls_token=cls_token,
283
+ pad_token=pad_token,
284
+ mask_token=mask_token,
285
+ cls_token_box=cls_token_box,
286
+ sep_token_box=sep_token_box,
287
+ pad_token_box=pad_token_box,
288
+ pad_token_label=pad_token_label,
289
+ only_label_first_subword=only_label_first_subword,
290
+ sp_model_kwargs=self.sp_model_kwargs,
291
+ **kwargs,
292
+ )
293
+
294
+ def __getstate__(self):
295
+ state = self.__dict__.copy()
296
+ state["sp_model"] = None
297
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
298
+ return state
299
+
300
+ def __setstate__(self, d):
301
+ self.__dict__ = d
302
+
303
+ # for backward compatibility
304
+ if not hasattr(self, "sp_model_kwargs"):
305
+ self.sp_model_kwargs = {}
306
+
307
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
308
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
309
+
310
+ def build_inputs_with_special_tokens(
311
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
312
+ ) -> List[int]:
313
+ """
314
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
315
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
316
+
317
+ - single sequence: `<s> X </s>`
318
+ - pair of sequences: `<s> A </s></s> B </s>`
319
+
320
+ Args:
321
+ token_ids_0 (`List[int]`):
322
+ List of IDs to which the special tokens will be added.
323
+ token_ids_1 (`List[int]`, *optional*):
324
+ Optional second list of IDs for sequence pairs.
325
+
326
+ Returns:
327
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
328
+ """
329
+
330
+ if token_ids_1 is None:
331
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
332
+ cls = [self.cls_token_id]
333
+ sep = [self.sep_token_id]
334
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
335
+
336
+ def get_special_tokens_mask(
337
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
338
+ ) -> List[int]:
339
+ """
340
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
341
+ special tokens using the tokenizer `prepare_for_model` method.
342
+
343
+ Args:
344
+ token_ids_0 (`List[int]`):
345
+ List of IDs.
346
+ token_ids_1 (`List[int]`, *optional*):
347
+ Optional second list of IDs for sequence pairs.
348
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
349
+ Whether or not the token list is already formatted with special tokens for the model.
350
+
351
+ Returns:
352
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
353
+ """
354
+
355
+ if already_has_special_tokens:
356
+ return super().get_special_tokens_mask(
357
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
358
+ )
359
+
360
+ if token_ids_1 is None:
361
+ return [1] + ([0] * len(token_ids_0)) + [1]
362
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
363
+
364
+ def create_token_type_ids_from_sequences(
365
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
366
+ ) -> List[int]:
367
+ """
368
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
369
+ not make use of token type ids, therefore a list of zeros is returned.
370
+
371
+ Args:
372
+ token_ids_0 (`List[int]`):
373
+ List of IDs.
374
+ token_ids_1 (`List[int]`, *optional*):
375
+ Optional second list of IDs for sequence pairs.
376
+
377
+ Returns:
378
+ `List[int]`: List of zeros.
379
+
380
+ """
381
+
382
+ sep = [self.sep_token_id]
383
+ cls = [self.cls_token_id]
384
+
385
+ if token_ids_1 is None:
386
+ return len(cls + token_ids_0 + sep) * [0]
387
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
388
+
389
+ @property
390
+ def vocab_size(self):
391
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
392
+
393
+ def get_vocab(self):
394
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
395
+ vocab.update(self.added_tokens_encoder)
396
+ return vocab
397
+
398
+ def _tokenize(self, text: str) -> List[str]:
399
+ return self.sp_model.encode(text, out_type=str)
400
+
401
+ def _convert_token_to_id(self, token):
402
+ """Converts a token (str) in an id using the vocab."""
403
+ if token in self.fairseq_tokens_to_ids:
404
+ return self.fairseq_tokens_to_ids[token]
405
+ spm_id = self.sp_model.PieceToId(token)
406
+
407
+ # Need to return unknown token if the SP model returned 0
408
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
409
+
410
+ def _convert_id_to_token(self, index):
411
+ """Converts an index (integer) in a token (str) using the vocab."""
412
+ if index in self.fairseq_ids_to_tokens:
413
+ return self.fairseq_ids_to_tokens[index]
414
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
415
+
416
+ def convert_tokens_to_string(self, tokens):
417
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
418
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
419
+ return out_string
420
+
421
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
422
+ if not os.path.isdir(save_directory):
423
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
424
+ return
425
+ out_vocab_file = os.path.join(
426
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
427
+ )
428
+
429
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
430
+ copyfile(self.vocab_file, out_vocab_file)
431
+ elif not os.path.isfile(self.vocab_file):
432
+ with open(out_vocab_file, "wb") as fi:
433
+ content_spiece_model = self.sp_model.serialized_model_proto()
434
+ fi.write(content_spiece_model)
435
+
436
+ return (out_vocab_file,)
437
+
438
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
439
+ def __call__(
440
+ self,
441
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
442
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
443
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
444
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
445
+ add_special_tokens: bool = True,
446
+ padding: Union[bool, str, PaddingStrategy] = False,
447
+ truncation: Union[bool, str, TruncationStrategy] = None,
448
+ max_length: Optional[int] = None,
449
+ stride: int = 0,
450
+ pad_to_multiple_of: Optional[int] = None,
451
+ return_tensors: Optional[Union[str, TensorType]] = None,
452
+ return_token_type_ids: Optional[bool] = None,
453
+ return_attention_mask: Optional[bool] = None,
454
+ return_overflowing_tokens: bool = False,
455
+ return_special_tokens_mask: bool = False,
456
+ return_offsets_mapping: bool = False,
457
+ return_length: bool = False,
458
+ verbose: bool = True,
459
+ **kwargs,
460
+ ) -> BatchEncoding:
461
+ """
462
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
463
+ sequences with word-level normalized bounding boxes and optional labels.
464
+
465
+ Args:
466
+ text (`str`, `List[str]`, `List[List[str]]`):
467
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
468
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
469
+ words).
470
+ text_pair (`List[str]`, `List[List[str]]`):
471
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
472
+ (pretokenized string).
473
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
474
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
475
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
476
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
477
+ """
478
+
479
+ # Input type checking for clearer error
480
+ def _is_valid_text_input(t):
481
+ if isinstance(t, str):
482
+ # Strings are fine
483
+ return True
484
+ elif isinstance(t, (list, tuple)):
485
+ # List are fine as long as they are...
486
+ if len(t) == 0:
487
+ # ... empty
488
+ return True
489
+ elif isinstance(t[0], str):
490
+ # ... list of strings
491
+ return True
492
+ elif isinstance(t[0], (list, tuple)):
493
+ # ... list with an empty list or with a list of strings
494
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
495
+ else:
496
+ return False
497
+ else:
498
+ return False
499
+
500
+ if text_pair is not None:
501
+ # in case text + text_pair are provided, text = questions, text_pair = words
502
+ if not _is_valid_text_input(text):
503
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
504
+ if not isinstance(text_pair, (list, tuple)):
505
+ raise ValueError(
506
+ "words must of type `List[str]` (single pretokenized example), "
507
+ "or `List[List[str]]` (batch of pretokenized examples)."
508
+ )
509
+ else:
510
+ # in case only text is provided => must be words
511
+ if not isinstance(text, (list, tuple)):
512
+ raise ValueError(
513
+ "Words must of type `List[str]` (single pretokenized example), "
514
+ "or `List[List[str]]` (batch of pretokenized examples)."
515
+ )
516
+
517
+ if text_pair is not None:
518
+ is_batched = isinstance(text, (list, tuple))
519
+ else:
520
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
521
+
522
+ words = text if text_pair is None else text_pair
523
+ if boxes is None:
524
+ raise ValueError("You must provide corresponding bounding boxes")
525
+ if is_batched:
526
+ if len(words) != len(boxes):
527
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
528
+ for words_example, boxes_example in zip(words, boxes):
529
+ if len(words_example) != len(boxes_example):
530
+ raise ValueError("You must provide as many words as there are bounding boxes")
531
+ else:
532
+ if len(words) != len(boxes):
533
+ raise ValueError("You must provide as many words as there are bounding boxes")
534
+
535
+ if is_batched:
536
+ if text_pair is not None and len(text) != len(text_pair):
537
+ raise ValueError(
538
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
539
+ f" {len(text_pair)}."
540
+ )
541
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
542
+ is_pair = bool(text_pair is not None)
543
+ return self.batch_encode_plus(
544
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
545
+ is_pair=is_pair,
546
+ boxes=boxes,
547
+ word_labels=word_labels,
548
+ add_special_tokens=add_special_tokens,
549
+ padding=padding,
550
+ truncation=truncation,
551
+ max_length=max_length,
552
+ stride=stride,
553
+ pad_to_multiple_of=pad_to_multiple_of,
554
+ return_tensors=return_tensors,
555
+ return_token_type_ids=return_token_type_ids,
556
+ return_attention_mask=return_attention_mask,
557
+ return_overflowing_tokens=return_overflowing_tokens,
558
+ return_special_tokens_mask=return_special_tokens_mask,
559
+ return_offsets_mapping=return_offsets_mapping,
560
+ return_length=return_length,
561
+ verbose=verbose,
562
+ **kwargs,
563
+ )
564
+ else:
565
+ return self.encode_plus(
566
+ text=text,
567
+ text_pair=text_pair,
568
+ boxes=boxes,
569
+ word_labels=word_labels,
570
+ add_special_tokens=add_special_tokens,
571
+ padding=padding,
572
+ truncation=truncation,
573
+ max_length=max_length,
574
+ stride=stride,
575
+ pad_to_multiple_of=pad_to_multiple_of,
576
+ return_tensors=return_tensors,
577
+ return_token_type_ids=return_token_type_ids,
578
+ return_attention_mask=return_attention_mask,
579
+ return_overflowing_tokens=return_overflowing_tokens,
580
+ return_special_tokens_mask=return_special_tokens_mask,
581
+ return_offsets_mapping=return_offsets_mapping,
582
+ return_length=return_length,
583
+ verbose=verbose,
584
+ **kwargs,
585
+ )
586
+
587
+ def _batch_encode_plus(
588
+ self,
589
+ batch_text_or_text_pairs: Union[
590
+ List[TextInput],
591
+ List[TextInputPair],
592
+ List[PreTokenizedInput],
593
+ ],
594
+ is_pair: bool = None,
595
+ boxes: Optional[List[List[List[int]]]] = None,
596
+ word_labels: Optional[List[List[int]]] = None,
597
+ add_special_tokens: bool = True,
598
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
599
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
600
+ max_length: Optional[int] = None,
601
+ stride: int = 0,
602
+ pad_to_multiple_of: Optional[int] = None,
603
+ return_tensors: Optional[Union[str, TensorType]] = None,
604
+ return_token_type_ids: Optional[bool] = None,
605
+ return_attention_mask: Optional[bool] = None,
606
+ return_overflowing_tokens: bool = False,
607
+ return_special_tokens_mask: bool = False,
608
+ return_offsets_mapping: bool = False,
609
+ return_length: bool = False,
610
+ verbose: bool = True,
611
+ **kwargs,
612
+ ) -> BatchEncoding:
613
+ if return_offsets_mapping:
614
+ raise NotImplementedError(
615
+ "return_offset_mapping is not available when using Python tokenizers. "
616
+ "To use this feature, change your tokenizer to one deriving from "
617
+ "transformers.PreTrainedTokenizerFast."
618
+ )
619
+
620
+ batch_outputs = self._batch_prepare_for_model(
621
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
622
+ is_pair=is_pair,
623
+ boxes=boxes,
624
+ word_labels=word_labels,
625
+ add_special_tokens=add_special_tokens,
626
+ padding_strategy=padding_strategy,
627
+ truncation_strategy=truncation_strategy,
628
+ max_length=max_length,
629
+ stride=stride,
630
+ pad_to_multiple_of=pad_to_multiple_of,
631
+ return_attention_mask=return_attention_mask,
632
+ return_token_type_ids=return_token_type_ids,
633
+ return_overflowing_tokens=return_overflowing_tokens,
634
+ return_special_tokens_mask=return_special_tokens_mask,
635
+ return_length=return_length,
636
+ return_tensors=return_tensors,
637
+ verbose=verbose,
638
+ )
639
+
640
+ return BatchEncoding(batch_outputs)
641
+
642
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
643
+ def _batch_prepare_for_model(
644
+ self,
645
+ batch_text_or_text_pairs,
646
+ is_pair: bool = None,
647
+ boxes: Optional[List[List[int]]] = None,
648
+ word_labels: Optional[List[List[int]]] = None,
649
+ add_special_tokens: bool = True,
650
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
651
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
652
+ max_length: Optional[int] = None,
653
+ stride: int = 0,
654
+ pad_to_multiple_of: Optional[int] = None,
655
+ return_tensors: Optional[str] = None,
656
+ return_token_type_ids: Optional[bool] = None,
657
+ return_attention_mask: Optional[bool] = None,
658
+ return_overflowing_tokens: bool = False,
659
+ return_special_tokens_mask: bool = False,
660
+ return_length: bool = False,
661
+ verbose: bool = True,
662
+ ) -> BatchEncoding:
663
+ """
664
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
665
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
666
+ manages a moving window (with user defined stride) for overflowing tokens
667
+
668
+ Args:
669
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
670
+ """
671
+
672
+ batch_outputs = {}
673
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
674
+ batch_text_or_text_pair, boxes_example = example
675
+ outputs = self.prepare_for_model(
676
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
677
+ batch_text_or_text_pair[1] if is_pair else None,
678
+ boxes_example,
679
+ word_labels=word_labels[idx] if word_labels is not None else None,
680
+ add_special_tokens=add_special_tokens,
681
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
682
+ truncation=truncation_strategy.value,
683
+ max_length=max_length,
684
+ stride=stride,
685
+ pad_to_multiple_of=None, # we pad in batch afterward
686
+ return_attention_mask=False, # we pad in batch afterward
687
+ return_token_type_ids=return_token_type_ids,
688
+ return_overflowing_tokens=return_overflowing_tokens,
689
+ return_special_tokens_mask=return_special_tokens_mask,
690
+ return_length=return_length,
691
+ return_tensors=None, # We convert the whole batch to tensors at the end
692
+ prepend_batch_axis=False,
693
+ verbose=verbose,
694
+ )
695
+
696
+ for key, value in outputs.items():
697
+ if key not in batch_outputs:
698
+ batch_outputs[key] = []
699
+ batch_outputs[key].append(value)
700
+
701
+ batch_outputs = self.pad(
702
+ batch_outputs,
703
+ padding=padding_strategy.value,
704
+ max_length=max_length,
705
+ pad_to_multiple_of=pad_to_multiple_of,
706
+ return_attention_mask=return_attention_mask,
707
+ )
708
+
709
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
710
+
711
+ return batch_outputs
712
+
713
+ def _encode_plus(
714
+ self,
715
+ text: Union[TextInput, PreTokenizedInput],
716
+ text_pair: Optional[PreTokenizedInput] = None,
717
+ boxes: Optional[List[List[int]]] = None,
718
+ word_labels: Optional[List[int]] = None,
719
+ add_special_tokens: bool = True,
720
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
721
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
722
+ max_length: Optional[int] = None,
723
+ stride: int = 0,
724
+ pad_to_multiple_of: Optional[int] = None,
725
+ return_tensors: Optional[Union[str, TensorType]] = None,
726
+ return_token_type_ids: Optional[bool] = None,
727
+ return_attention_mask: Optional[bool] = None,
728
+ return_overflowing_tokens: bool = False,
729
+ return_special_tokens_mask: bool = False,
730
+ return_offsets_mapping: bool = False,
731
+ return_length: bool = False,
732
+ verbose: bool = True,
733
+ **kwargs,
734
+ ) -> BatchEncoding:
735
+ if return_offsets_mapping:
736
+ raise NotImplementedError(
737
+ "return_offset_mapping is not available when using Python tokenizers. "
738
+ "To use this feature, change your tokenizer to one deriving from "
739
+ "transformers.PreTrainedTokenizerFast. "
740
+ "More information on available tokenizers at "
741
+ "https://github.com/huggingface/transformers/pull/2674"
742
+ )
743
+
744
+ return self.prepare_for_model(
745
+ text=text,
746
+ text_pair=text_pair,
747
+ boxes=boxes,
748
+ word_labels=word_labels,
749
+ add_special_tokens=add_special_tokens,
750
+ padding=padding_strategy.value,
751
+ truncation=truncation_strategy.value,
752
+ max_length=max_length,
753
+ stride=stride,
754
+ pad_to_multiple_of=pad_to_multiple_of,
755
+ return_tensors=return_tensors,
756
+ prepend_batch_axis=True,
757
+ return_attention_mask=return_attention_mask,
758
+ return_token_type_ids=return_token_type_ids,
759
+ return_overflowing_tokens=return_overflowing_tokens,
760
+ return_special_tokens_mask=return_special_tokens_mask,
761
+ return_length=return_length,
762
+ verbose=verbose,
763
+ )
764
+
765
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
766
+ def prepare_for_model(
767
+ self,
768
+ text: Union[TextInput, PreTokenizedInput],
769
+ text_pair: Optional[PreTokenizedInput] = None,
770
+ boxes: Optional[List[List[int]]] = None,
771
+ word_labels: Optional[List[int]] = None,
772
+ add_special_tokens: bool = True,
773
+ padding: Union[bool, str, PaddingStrategy] = False,
774
+ truncation: Union[bool, str, TruncationStrategy] = None,
775
+ max_length: Optional[int] = None,
776
+ stride: int = 0,
777
+ pad_to_multiple_of: Optional[int] = None,
778
+ return_tensors: Optional[Union[str, TensorType]] = None,
779
+ return_token_type_ids: Optional[bool] = None,
780
+ return_attention_mask: Optional[bool] = None,
781
+ return_overflowing_tokens: bool = False,
782
+ return_special_tokens_mask: bool = False,
783
+ return_offsets_mapping: bool = False,
784
+ return_length: bool = False,
785
+ verbose: bool = True,
786
+ prepend_batch_axis: bool = False,
787
+ **kwargs,
788
+ ) -> BatchEncoding:
789
+ """
790
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
791
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
792
+ (with user defined stride) for overflowing tokens.
793
+
794
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
795
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
796
+ labeled with -100, such that they will be ignored by the loss function.
797
+
798
+ Args:
799
+ text (`str`, `List[str]`, `List[List[str]]`):
800
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
801
+ text_pair (`List[str]` or `List[int]`, *optional*):
802
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
803
+ list of list of strings (words of a batch of examples).
804
+ """
805
+
806
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
807
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
808
+ padding=padding,
809
+ truncation=truncation,
810
+ max_length=max_length,
811
+ pad_to_multiple_of=pad_to_multiple_of,
812
+ verbose=verbose,
813
+ **kwargs,
814
+ )
815
+
816
+ tokens = []
817
+ pair_tokens = []
818
+ token_boxes = []
819
+ pair_token_boxes = []
820
+ labels = []
821
+
822
+ if text_pair is None:
823
+ if word_labels is None:
824
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
825
+ for word, box in zip(text, boxes):
826
+ if len(word) < 1: # skip empty words
827
+ continue
828
+ word_tokens = self.tokenize(word)
829
+ tokens.extend(word_tokens)
830
+ token_boxes.extend([box] * len(word_tokens))
831
+ else:
832
+ # CASE 2: token classification (training)
833
+ for word, box, label in zip(text, boxes, word_labels):
834
+ if len(word) < 1: # skip empty words
835
+ continue
836
+ word_tokens = self.tokenize(word)
837
+ tokens.extend(word_tokens)
838
+ token_boxes.extend([box] * len(word_tokens))
839
+ if self.only_label_first_subword:
840
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
841
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
842
+ else:
843
+ labels.extend([label] * len(word_tokens))
844
+ else:
845
+ # CASE 3: document visual question answering (inference)
846
+ # text = question
847
+ # text_pair = words
848
+ tokens = self.tokenize(text)
849
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
850
+
851
+ for word, box in zip(text_pair, boxes):
852
+ if len(word) < 1: # skip empty words
853
+ continue
854
+ word_tokens = self.tokenize(word)
855
+ pair_tokens.extend(word_tokens)
856
+ pair_token_boxes.extend([box] * len(word_tokens))
857
+
858
+ # Create ids + pair_ids
859
+ ids = self.convert_tokens_to_ids(tokens)
860
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
861
+
862
+ # Compute the total size of the returned encodings
863
+ pair = bool(pair_ids is not None)
864
+ len_ids = len(ids)
865
+ len_pair_ids = len(pair_ids) if pair else 0
866
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
867
+
868
+ # Truncation: Handle max sequence length
869
+ overflowing_tokens = []
870
+ overflowing_token_boxes = []
871
+ overflowing_labels = []
872
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
873
+ (
874
+ ids,
875
+ token_boxes,
876
+ pair_ids,
877
+ pair_token_boxes,
878
+ labels,
879
+ overflowing_tokens,
880
+ overflowing_token_boxes,
881
+ overflowing_labels,
882
+ ) = self.truncate_sequences(
883
+ ids,
884
+ token_boxes,
885
+ pair_ids=pair_ids,
886
+ pair_token_boxes=pair_token_boxes,
887
+ labels=labels,
888
+ num_tokens_to_remove=total_len - max_length,
889
+ truncation_strategy=truncation_strategy,
890
+ stride=stride,
891
+ )
892
+
893
+ if return_token_type_ids and not add_special_tokens:
894
+ raise ValueError(
895
+ "Asking to return token_type_ids while setting add_special_tokens to False "
896
+ "results in an undefined behavior. Please set add_special_tokens to True or "
897
+ "set return_token_type_ids to None."
898
+ )
899
+
900
+ # Load from model defaults
901
+ if return_token_type_ids is None:
902
+ return_token_type_ids = "token_type_ids" in self.model_input_names
903
+ if return_attention_mask is None:
904
+ return_attention_mask = "attention_mask" in self.model_input_names
905
+
906
+ encoded_inputs = {}
907
+
908
+ if return_overflowing_tokens:
909
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
910
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
911
+ encoded_inputs["overflowing_labels"] = overflowing_labels
912
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
913
+
914
+ # Add special tokens
915
+ if add_special_tokens:
916
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
917
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
918
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
919
+ if pair_token_boxes:
920
+ pair_token_boxes = pair_token_boxes + [self.sep_token_box]
921
+ if labels:
922
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
923
+ else:
924
+ sequence = ids + pair_ids if pair else ids
925
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
926
+
927
+ # Build output dictionary
928
+ encoded_inputs["input_ids"] = sequence
929
+ encoded_inputs["bbox"] = token_boxes + pair_token_boxes
930
+ if return_token_type_ids:
931
+ encoded_inputs["token_type_ids"] = token_type_ids
932
+ if return_special_tokens_mask:
933
+ if add_special_tokens:
934
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
935
+ else:
936
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
937
+
938
+ if labels:
939
+ encoded_inputs["labels"] = labels
940
+
941
+ # Check lengths
942
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
943
+
944
+ # Padding
945
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
946
+ encoded_inputs = self.pad(
947
+ encoded_inputs,
948
+ max_length=max_length,
949
+ padding=padding_strategy.value,
950
+ pad_to_multiple_of=pad_to_multiple_of,
951
+ return_attention_mask=return_attention_mask,
952
+ )
953
+
954
+ if return_length:
955
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
956
+
957
+ batch_outputs = BatchEncoding(
958
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
959
+ )
960
+
961
+ return batch_outputs
962
+
963
+ def truncate_sequences(
964
+ self,
965
+ ids: List[int],
966
+ token_boxes: List[List[int]],
967
+ pair_ids: Optional[List[int]] = None,
968
+ pair_token_boxes: Optional[List[List[int]]] = None,
969
+ labels: Optional[List[int]] = None,
970
+ num_tokens_to_remove: int = 0,
971
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
972
+ stride: int = 0,
973
+ ) -> Tuple[List[int], List[int], List[int]]:
974
+ """
975
+ Truncates a sequence pair in-place following the strategy.
976
+
977
+ Args:
978
+ ids (`List[int]`):
979
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
980
+ `convert_tokens_to_ids` methods.
981
+ token_boxes (`List[List[int]]`):
982
+ Bounding boxes of the first sequence.
983
+ pair_ids (`List[int]`, *optional*):
984
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
985
+ and `convert_tokens_to_ids` methods.
986
+ pair_token_boxes (`List[List[int]]`, *optional*):
987
+ Bounding boxes of the second sequence.
988
+ labels (`List[int]`, *optional*):
989
+ Labels of the first sequence (for token classification tasks).
990
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
991
+ Number of tokens to remove using the truncation strategy.
992
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
993
+ The strategy to follow for truncation. Can be:
994
+
995
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
996
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
997
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
998
+ batch of pairs) is provided.
999
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1000
+ maximum acceptable input length for the model if that argument is not provided. This will only
1001
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1002
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1003
+ maximum acceptable input length for the model if that argument is not provided. This will only
1004
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1005
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1006
+ than the model maximum admissible input size).
1007
+ stride (`int`, *optional*, defaults to 0):
1008
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1009
+ sequence returned. The value of this argument defines the number of additional tokens.
1010
+
1011
+ Returns:
1012
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1013
+ overflowing tokens.
1014
+ """
1015
+ if num_tokens_to_remove <= 0:
1016
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
1017
+
1018
+ if not isinstance(truncation_strategy, TruncationStrategy):
1019
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1020
+
1021
+ overflowing_tokens = []
1022
+ overflowing_token_boxes = []
1023
+ overflowing_labels = []
1024
+ if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1025
+ for _ in range(num_tokens_to_remove):
1026
+ if pair_ids is None or len(ids) > len(pair_ids):
1027
+ if not overflowing_tokens:
1028
+ window_len = min(len(ids), stride + 1)
1029
+ else:
1030
+ window_len = 1
1031
+ overflowing_tokens.extend(ids[-window_len:])
1032
+ overflowing_token_boxes.extend(token_boxes[-window_len:])
1033
+ overflowing_labels.extend(labels[-window_len:])
1034
+ ids = ids[:-1]
1035
+ token_boxes = token_boxes[:-1]
1036
+ labels = labels[:-1]
1037
+ else:
1038
+ if not overflowing_tokens:
1039
+ window_len = min(len(pair_ids), stride + 1)
1040
+ else:
1041
+ window_len = 1
1042
+ overflowing_tokens.extend(pair_ids[-window_len:])
1043
+ overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
1044
+ pair_ids = pair_ids[:-1]
1045
+ pair_token_boxes = pair_token_boxes[:-1]
1046
+ elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
1047
+ if len(ids) > num_tokens_to_remove:
1048
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1049
+ overflowing_tokens = ids[-window_len:]
1050
+ overflowing_token_boxes = token_boxes[-window_len:]
1051
+ overflowing_labels = labels[-window_len:]
1052
+ ids = ids[:-num_tokens_to_remove]
1053
+ token_boxes = token_boxes[:-num_tokens_to_remove]
1054
+ labels = labels[:-num_tokens_to_remove]
1055
+ else:
1056
+ logger.error(
1057
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1058
+ f"but the first sequence has a length {len(ids)}. "
1059
+ f"Please select another truncation strategy than {truncation_strategy}, "
1060
+ "for instance 'longest_first' or 'only_second'."
1061
+ )
1062
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1063
+ if len(pair_ids) > num_tokens_to_remove:
1064
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1065
+ overflowing_tokens = pair_ids[-window_len:]
1066
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
1067
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1068
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
1069
+ else:
1070
+ logger.error(
1071
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1072
+ f"but the second sequence has a length {len(pair_ids)}. "
1073
+ f"Please select another truncation strategy than {truncation_strategy}, "
1074
+ "for instance 'longest_first' or 'only_first'."
1075
+ )
1076
+
1077
+ return (
1078
+ ids,
1079
+ token_boxes,
1080
+ pair_ids,
1081
+ pair_token_boxes,
1082
+ labels,
1083
+ overflowing_tokens,
1084
+ overflowing_token_boxes,
1085
+ overflowing_labels,
1086
+ )
1087
+
1088
+ def _pad(
1089
+ self,
1090
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1091
+ max_length: Optional[int] = None,
1092
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1093
+ pad_to_multiple_of: Optional[int] = None,
1094
+ return_attention_mask: Optional[bool] = None,
1095
+ ) -> dict:
1096
+ """
1097
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1098
+
1099
+ Args:
1100
+ encoded_inputs:
1101
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1102
+ max_length: maximum length of the returned list and optionally padding length (see below).
1103
+ Will truncate by taking into account the special tokens.
1104
+ padding_strategy: PaddingStrategy to use for padding.
1105
+
1106
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1107
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1108
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1109
+ The tokenizer padding sides are defined in self.padding_side:
1110
+
1111
+ - 'left': pads on the left of the sequences
1112
+ - 'right': pads on the right of the sequences
1113
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1114
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1115
+ `>= 7.5` (Volta).
1116
+ return_attention_mask:
1117
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1118
+ """
1119
+ # Load from model defaults
1120
+ if return_attention_mask is None:
1121
+ return_attention_mask = "attention_mask" in self.model_input_names
1122
+
1123
+ required_input = encoded_inputs[self.model_input_names[0]]
1124
+
1125
+ if padding_strategy == PaddingStrategy.LONGEST:
1126
+ max_length = len(required_input)
1127
+
1128
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1129
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1130
+
1131
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1132
+
1133
+ # Initialize attention mask if not present.
1134
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1135
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1136
+
1137
+ if needs_to_be_padded:
1138
+ difference = max_length - len(required_input)
1139
+ if self.padding_side == "right":
1140
+ if return_attention_mask:
1141
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1142
+ if "token_type_ids" in encoded_inputs:
1143
+ encoded_inputs["token_type_ids"] = (
1144
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1145
+ )
1146
+ if "bbox" in encoded_inputs:
1147
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
1148
+ if "labels" in encoded_inputs:
1149
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1150
+ if "special_tokens_mask" in encoded_inputs:
1151
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1152
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1153
+ elif self.padding_side == "left":
1154
+ if return_attention_mask:
1155
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1156
+ if "token_type_ids" in encoded_inputs:
1157
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1158
+ "token_type_ids"
1159
+ ]
1160
+ if "bbox" in encoded_inputs:
1161
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
1162
+ if "labels" in encoded_inputs:
1163
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1164
+ if "special_tokens_mask" in encoded_inputs:
1165
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1166
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1167
+ else:
1168
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1169
+
1170
+ return encoded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for LayoutXLM model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_base import (
24
+ BatchEncoding,
25
+ EncodedInput,
26
+ PreTokenizedInput,
27
+ TextInput,
28
+ TextInputPair,
29
+ TruncationStrategy,
30
+ )
31
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
32
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging
33
+ from ..xlm_roberta.tokenization_xlm_roberta_fast import (
34
+ VOCAB_FILES_NAMES,
35
+ )
36
+
37
+
38
+ if is_sentencepiece_available():
39
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
40
+ else:
41
+ LayoutXLMTokenizer = None
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
47
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
48
+ Whether or not to encode the sequences with the special tokens relative to their model.
49
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
50
+ Activates and controls padding. Accepts the following values:
51
+
52
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
53
+ sequence if provided).
54
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
55
+ acceptable input length for the model if that argument is not provided.
56
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
57
+ lengths).
58
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
59
+ Activates and controls truncation. Accepts the following values:
60
+
61
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
62
+ to the maximum acceptable input length for the model if that argument is not provided. This will
63
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
64
+ sequences (or a batch of pairs) is provided.
65
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
66
+ maximum acceptable input length for the model if that argument is not provided. This will only
67
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
68
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
69
+ maximum acceptable input length for the model if that argument is not provided. This will only
70
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
71
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
72
+ greater than the model maximum admissible input size).
73
+ max_length (`int`, *optional*):
74
+ Controls the maximum length to use by one of the truncation/padding parameters.
75
+
76
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
77
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
78
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
79
+ stride (`int`, *optional*, defaults to 0):
80
+ If set to a number along with `max_length`, the overflowing tokens returned when
81
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
82
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
83
+ argument defines the number of overlapping tokens.
84
+ pad_to_multiple_of (`int`, *optional*):
85
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
86
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
87
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
88
+ If set, will return tensors instead of list of python integers. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return Numpy `np.ndarray` objects.
93
+ return_token_type_ids (`bool`, *optional*):
94
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
95
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
96
+
97
+ [What are token type IDs?](../glossary#token-type-ids)
98
+ return_attention_mask (`bool`, *optional*):
99
+ Whether to return the attention mask. If left to the default, will return the attention mask according
100
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
101
+
102
+ [What are attention masks?](../glossary#attention-mask)
103
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
104
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
105
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
106
+ of returning overflowing tokens.
107
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
108
+ Whether or not to return special tokens mask information.
109
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to return `(char_start, char_end)` for each token.
111
+
112
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
113
+ Python's tokenizer, this method will raise `NotImplementedError`.
114
+ return_length (`bool`, *optional*, defaults to `False`):
115
+ Whether or not to return the lengths of the encoded inputs.
116
+ verbose (`bool`, *optional*, defaults to `True`):
117
+ Whether or not to print more information and warnings.
118
+ **kwargs: passed to the `self.tokenize()` method
119
+
120
+ Return:
121
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
122
+
123
+ - **input_ids** -- List of token ids to be fed to a model.
124
+
125
+ [What are input IDs?](../glossary#input-ids)
126
+
127
+ - **bbox** -- List of bounding boxes to be fed to a model.
128
+
129
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
130
+ if *"token_type_ids"* is in `self.model_input_names`).
131
+
132
+ [What are token type IDs?](../glossary#token-type-ids)
133
+
134
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
135
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
136
+
137
+ [What are attention masks?](../glossary#attention-mask)
138
+
139
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
140
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
141
+ `return_overflowing_tokens=True`).
142
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
143
+ `return_overflowing_tokens=True`).
144
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
145
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
146
+ - **length** -- The length of the inputs (when `return_length=True`).
147
+ """
148
+
149
+
150
+ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
151
+ """
152
+ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
153
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
154
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
155
+
156
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
157
+ refer to this superclass for more information regarding those methods.
158
+
159
+ Args:
160
+ vocab_file (`str`):
161
+ Path to the vocabulary file.
162
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
163
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
164
+
165
+ <Tip>
166
+
167
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
168
+ sequence. The token used is the `cls_token`.
169
+
170
+ </Tip>
171
+
172
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
173
+ The end of sequence token.
174
+
175
+ <Tip>
176
+
177
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
178
+ The token used is the `sep_token`.
179
+
180
+ </Tip>
181
+
182
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
183
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
184
+ sequence classification or for a text and a question for question answering. It is also used as the last
185
+ token of a sequence built with special tokens.
186
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
187
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
188
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
189
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
190
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
191
+ token instead.
192
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
193
+ The token used for padding, for example when batching sequences of different lengths.
194
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
195
+ The token used for masking values. This is the token used when training this model with masked language
196
+ modeling. This is the token which the model will try to predict.
197
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
198
+ The bounding box to use for the special [CLS] token.
199
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
200
+ The bounding box to use for the special [SEP] token.
201
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
202
+ The bounding box to use for the special [PAD] token.
203
+ pad_token_label (`int`, *optional*, defaults to -100):
204
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
205
+ CrossEntropyLoss.
206
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
207
+ Whether or not to only label the first subword, in case word labels are provided.
208
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
209
+ Additional special tokens used by the tokenizer.
210
+ """
211
+
212
+ vocab_files_names = VOCAB_FILES_NAMES
213
+ model_input_names = ["input_ids", "attention_mask"]
214
+ slow_tokenizer_class = LayoutXLMTokenizer
215
+
216
+ def __init__(
217
+ self,
218
+ vocab_file=None,
219
+ tokenizer_file=None,
220
+ bos_token="<s>",
221
+ eos_token="</s>",
222
+ sep_token="</s>",
223
+ cls_token="<s>",
224
+ unk_token="<unk>",
225
+ pad_token="<pad>",
226
+ mask_token="<mask>",
227
+ cls_token_box=[0, 0, 0, 0],
228
+ sep_token_box=[1000, 1000, 1000, 1000],
229
+ pad_token_box=[0, 0, 0, 0],
230
+ pad_token_label=-100,
231
+ only_label_first_subword=True,
232
+ **kwargs,
233
+ ):
234
+ # Mask token behave like a normal word, i.e. include the space before it
235
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
236
+
237
+ super().__init__(
238
+ vocab_file,
239
+ tokenizer_file=tokenizer_file,
240
+ bos_token=bos_token,
241
+ eos_token=eos_token,
242
+ sep_token=sep_token,
243
+ cls_token=cls_token,
244
+ unk_token=unk_token,
245
+ pad_token=pad_token,
246
+ mask_token=mask_token,
247
+ cls_token_box=cls_token_box,
248
+ sep_token_box=sep_token_box,
249
+ pad_token_box=pad_token_box,
250
+ pad_token_label=pad_token_label,
251
+ only_label_first_subword=only_label_first_subword,
252
+ **kwargs,
253
+ )
254
+
255
+ self.vocab_file = vocab_file
256
+
257
+ # additional properties
258
+ self.cls_token_box = cls_token_box
259
+ self.sep_token_box = sep_token_box
260
+ self.pad_token_box = pad_token_box
261
+ self.pad_token_label = pad_token_label
262
+ self.only_label_first_subword = only_label_first_subword
263
+
264
+ @property
265
+ def can_save_slow_tokenizer(self) -> bool:
266
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
267
+
268
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
269
+ def __call__(
270
+ self,
271
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
272
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
273
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
274
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
275
+ add_special_tokens: bool = True,
276
+ padding: Union[bool, str, PaddingStrategy] = False,
277
+ truncation: Union[bool, str, TruncationStrategy] = None,
278
+ max_length: Optional[int] = None,
279
+ stride: int = 0,
280
+ pad_to_multiple_of: Optional[int] = None,
281
+ return_tensors: Optional[Union[str, TensorType]] = None,
282
+ return_token_type_ids: Optional[bool] = None,
283
+ return_attention_mask: Optional[bool] = None,
284
+ return_overflowing_tokens: bool = False,
285
+ return_special_tokens_mask: bool = False,
286
+ return_offsets_mapping: bool = False,
287
+ return_length: bool = False,
288
+ verbose: bool = True,
289
+ **kwargs,
290
+ ) -> BatchEncoding:
291
+ """
292
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
293
+ sequences with word-level normalized bounding boxes and optional labels.
294
+
295
+ Args:
296
+ text (`str`, `List[str]`, `List[List[str]]`):
297
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
298
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
299
+ words).
300
+ text_pair (`List[str]`, `List[List[str]]`):
301
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
302
+ (pretokenized string).
303
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
304
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
305
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
306
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
307
+ """
308
+
309
+ # Input type checking for clearer error
310
+ def _is_valid_text_input(t):
311
+ if isinstance(t, str):
312
+ # Strings are fine
313
+ return True
314
+ elif isinstance(t, (list, tuple)):
315
+ # List are fine as long as they are...
316
+ if len(t) == 0:
317
+ # ... empty
318
+ return True
319
+ elif isinstance(t[0], str):
320
+ # ... list of strings
321
+ return True
322
+ elif isinstance(t[0], (list, tuple)):
323
+ # ... list with an empty list or with a list of strings
324
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
325
+ else:
326
+ return False
327
+ else:
328
+ return False
329
+
330
+ if text_pair is not None:
331
+ # in case text + text_pair are provided, text = questions, text_pair = words
332
+ if not _is_valid_text_input(text):
333
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
334
+ if not isinstance(text_pair, (list, tuple)):
335
+ raise ValueError(
336
+ "words must of type `List[str]` (single pretokenized example), "
337
+ "or `List[List[str]]` (batch of pretokenized examples)."
338
+ )
339
+ else:
340
+ # in case only text is provided => must be words
341
+ if not isinstance(text, (list, tuple)):
342
+ raise ValueError(
343
+ "Words must of type `List[str]` (single pretokenized example), "
344
+ "or `List[List[str]]` (batch of pretokenized examples)."
345
+ )
346
+
347
+ if text_pair is not None:
348
+ is_batched = isinstance(text, (list, tuple))
349
+ else:
350
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
351
+
352
+ words = text if text_pair is None else text_pair
353
+ if boxes is None:
354
+ raise ValueError("You must provide corresponding bounding boxes")
355
+ if is_batched:
356
+ if len(words) != len(boxes):
357
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
358
+ for words_example, boxes_example in zip(words, boxes):
359
+ if len(words_example) != len(boxes_example):
360
+ raise ValueError("You must provide as many words as there are bounding boxes")
361
+ else:
362
+ if len(words) != len(boxes):
363
+ raise ValueError("You must provide as many words as there are bounding boxes")
364
+
365
+ if is_batched:
366
+ if text_pair is not None and len(text) != len(text_pair):
367
+ raise ValueError(
368
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
369
+ f" {len(text_pair)}."
370
+ )
371
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
372
+ is_pair = bool(text_pair is not None)
373
+ return self.batch_encode_plus(
374
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
375
+ is_pair=is_pair,
376
+ boxes=boxes,
377
+ word_labels=word_labels,
378
+ add_special_tokens=add_special_tokens,
379
+ padding=padding,
380
+ truncation=truncation,
381
+ max_length=max_length,
382
+ stride=stride,
383
+ pad_to_multiple_of=pad_to_multiple_of,
384
+ return_tensors=return_tensors,
385
+ return_token_type_ids=return_token_type_ids,
386
+ return_attention_mask=return_attention_mask,
387
+ return_overflowing_tokens=return_overflowing_tokens,
388
+ return_special_tokens_mask=return_special_tokens_mask,
389
+ return_offsets_mapping=return_offsets_mapping,
390
+ return_length=return_length,
391
+ verbose=verbose,
392
+ **kwargs,
393
+ )
394
+ else:
395
+ return self.encode_plus(
396
+ text=text,
397
+ text_pair=text_pair,
398
+ boxes=boxes,
399
+ word_labels=word_labels,
400
+ add_special_tokens=add_special_tokens,
401
+ padding=padding,
402
+ truncation=truncation,
403
+ max_length=max_length,
404
+ stride=stride,
405
+ pad_to_multiple_of=pad_to_multiple_of,
406
+ return_tensors=return_tensors,
407
+ return_token_type_ids=return_token_type_ids,
408
+ return_attention_mask=return_attention_mask,
409
+ return_overflowing_tokens=return_overflowing_tokens,
410
+ return_special_tokens_mask=return_special_tokens_mask,
411
+ return_offsets_mapping=return_offsets_mapping,
412
+ return_length=return_length,
413
+ verbose=verbose,
414
+ **kwargs,
415
+ )
416
+
417
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
418
+ batched_input = [(text, pair)] if pair else [text]
419
+ encodings = self._tokenizer.encode_batch(
420
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
421
+ )
422
+
423
+ return encodings[0].tokens
424
+
425
+ def _batch_encode_plus(
426
+ self,
427
+ batch_text_or_text_pairs: Union[
428
+ List[TextInput],
429
+ List[TextInputPair],
430
+ List[PreTokenizedInput],
431
+ ],
432
+ is_pair: bool = None,
433
+ boxes: Optional[List[List[List[int]]]] = None,
434
+ word_labels: Optional[List[List[int]]] = None,
435
+ add_special_tokens: bool = True,
436
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
437
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
438
+ max_length: Optional[int] = None,
439
+ stride: int = 0,
440
+ pad_to_multiple_of: Optional[int] = None,
441
+ return_tensors: Optional[str] = None,
442
+ return_token_type_ids: Optional[bool] = None,
443
+ return_attention_mask: Optional[bool] = None,
444
+ return_overflowing_tokens: bool = False,
445
+ return_special_tokens_mask: bool = False,
446
+ return_offsets_mapping: bool = False,
447
+ return_length: bool = False,
448
+ verbose: bool = True,
449
+ **kwargs,
450
+ ) -> BatchEncoding:
451
+ if not isinstance(batch_text_or_text_pairs, list):
452
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
453
+
454
+ # Set the truncation and padding strategy and restore the initial configuration
455
+ self.set_truncation_and_padding(
456
+ padding_strategy=padding_strategy,
457
+ truncation_strategy=truncation_strategy,
458
+ max_length=max_length,
459
+ stride=stride,
460
+ pad_to_multiple_of=pad_to_multiple_of,
461
+ )
462
+
463
+ if is_pair:
464
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
465
+
466
+ encodings = self._tokenizer.encode_batch(
467
+ batch_text_or_text_pairs,
468
+ add_special_tokens=add_special_tokens,
469
+ is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
470
+ )
471
+
472
+ # Convert encoding to dict
473
+ # `Tokens` has type: Tuple[
474
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
475
+ # List[EncodingFast]
476
+ # ]
477
+ # with nested dimensions corresponding to batch, overflows, sequence length
478
+ tokens_and_encodings = [
479
+ self._convert_encoding(
480
+ encoding=encoding,
481
+ return_token_type_ids=return_token_type_ids,
482
+ return_attention_mask=return_attention_mask,
483
+ return_overflowing_tokens=return_overflowing_tokens,
484
+ return_special_tokens_mask=return_special_tokens_mask,
485
+ return_offsets_mapping=True
486
+ if word_labels is not None
487
+ else return_offsets_mapping, # we use offsets to create the labels
488
+ return_length=return_length,
489
+ verbose=verbose,
490
+ )
491
+ for encoding in encodings
492
+ ]
493
+
494
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
495
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
496
+ # (we say ~ because the number of overflow varies with the example in the batch)
497
+ #
498
+ # To match each overflowing sample with the original sample in the batch
499
+ # we add an overflow_to_sample_mapping array (see below)
500
+ sanitized_tokens = {}
501
+ for key in tokens_and_encodings[0][0].keys():
502
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
503
+ sanitized_tokens[key] = stack
504
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
505
+
506
+ # If returning overflowing tokens, we need to return a mapping
507
+ # from the batch idx to the original sample
508
+ if return_overflowing_tokens:
509
+ overflow_to_sample_mapping = []
510
+ for i, (toks, _) in enumerate(tokens_and_encodings):
511
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
512
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
513
+
514
+ for input_ids in sanitized_tokens["input_ids"]:
515
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
516
+
517
+ # create the token boxes
518
+ token_boxes = []
519
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
520
+ if return_overflowing_tokens:
521
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
522
+ else:
523
+ original_index = batch_index
524
+ token_boxes_example = []
525
+ for id, sequence_id, word_id in zip(
526
+ sanitized_tokens["input_ids"][batch_index],
527
+ sanitized_encodings[batch_index].sequence_ids,
528
+ sanitized_encodings[batch_index].word_ids,
529
+ ):
530
+ if word_id is not None:
531
+ if is_pair and sequence_id == 0:
532
+ token_boxes_example.append(self.pad_token_box)
533
+ else:
534
+ token_boxes_example.append(boxes[original_index][word_id])
535
+ else:
536
+ if id == self.cls_token_id:
537
+ token_boxes_example.append(self.cls_token_box)
538
+ elif id == self.sep_token_id:
539
+ token_boxes_example.append(self.sep_token_box)
540
+ elif id == self.pad_token_id:
541
+ token_boxes_example.append(self.pad_token_box)
542
+ else:
543
+ raise ValueError("Id not recognized")
544
+ token_boxes.append(token_boxes_example)
545
+
546
+ sanitized_tokens["bbox"] = token_boxes
547
+
548
+ # optionally, create the labels
549
+ if word_labels is not None:
550
+ labels = []
551
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
552
+ if return_overflowing_tokens:
553
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
554
+ else:
555
+ original_index = batch_index
556
+ labels_example = []
557
+ for id, offset, word_id in zip(
558
+ sanitized_tokens["input_ids"][batch_index],
559
+ sanitized_tokens["offset_mapping"][batch_index],
560
+ sanitized_encodings[batch_index].word_ids,
561
+ ):
562
+ if word_id is not None:
563
+ if self.only_label_first_subword:
564
+ if offset[0] == 0:
565
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
566
+ labels_example.append(word_labels[original_index][word_id])
567
+ else:
568
+ labels_example.append(self.pad_token_label)
569
+ else:
570
+ labels_example.append(word_labels[original_index][word_id])
571
+ else:
572
+ labels_example.append(self.pad_token_label)
573
+ labels.append(labels_example)
574
+
575
+ sanitized_tokens["labels"] = labels
576
+ # finally, remove offsets if the user didn't want them
577
+ if not return_offsets_mapping:
578
+ del sanitized_tokens["offset_mapping"]
579
+
580
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
581
+
582
+ def _encode_plus(
583
+ self,
584
+ text: Union[TextInput, PreTokenizedInput],
585
+ text_pair: Optional[PreTokenizedInput] = None,
586
+ boxes: Optional[List[List[int]]] = None,
587
+ word_labels: Optional[List[int]] = None,
588
+ add_special_tokens: bool = True,
589
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
590
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
591
+ max_length: Optional[int] = None,
592
+ stride: int = 0,
593
+ pad_to_multiple_of: Optional[int] = None,
594
+ return_tensors: Optional[bool] = None,
595
+ return_token_type_ids: Optional[bool] = None,
596
+ return_attention_mask: Optional[bool] = None,
597
+ return_overflowing_tokens: bool = False,
598
+ return_special_tokens_mask: bool = False,
599
+ return_offsets_mapping: bool = False,
600
+ return_length: bool = False,
601
+ verbose: bool = True,
602
+ **kwargs,
603
+ ) -> BatchEncoding:
604
+ # make it a batched input
605
+ # 2 options:
606
+ # 1) only text, in case text must be a list of str
607
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
608
+ batched_input = [(text, text_pair)] if text_pair else [text]
609
+ batched_boxes = [boxes]
610
+ batched_word_labels = [word_labels] if word_labels is not None else None
611
+ batched_output = self._batch_encode_plus(
612
+ batched_input,
613
+ is_pair=bool(text_pair is not None),
614
+ boxes=batched_boxes,
615
+ word_labels=batched_word_labels,
616
+ add_special_tokens=add_special_tokens,
617
+ padding_strategy=padding_strategy,
618
+ truncation_strategy=truncation_strategy,
619
+ max_length=max_length,
620
+ stride=stride,
621
+ pad_to_multiple_of=pad_to_multiple_of,
622
+ return_tensors=return_tensors,
623
+ return_token_type_ids=return_token_type_ids,
624
+ return_attention_mask=return_attention_mask,
625
+ return_overflowing_tokens=return_overflowing_tokens,
626
+ return_special_tokens_mask=return_special_tokens_mask,
627
+ return_offsets_mapping=return_offsets_mapping,
628
+ return_length=return_length,
629
+ verbose=verbose,
630
+ **kwargs,
631
+ )
632
+
633
+ # Return tensor is None, then we can remove the leading batch axis
634
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
635
+ if return_tensors is None and not return_overflowing_tokens:
636
+ batched_output = BatchEncoding(
637
+ {
638
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
639
+ for key, value in batched_output.items()
640
+ },
641
+ batched_output.encodings,
642
+ )
643
+
644
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
645
+
646
+ return batched_output
647
+
648
+ def _pad(
649
+ self,
650
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
651
+ max_length: Optional[int] = None,
652
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
653
+ pad_to_multiple_of: Optional[int] = None,
654
+ return_attention_mask: Optional[bool] = None,
655
+ ) -> dict:
656
+ """
657
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
658
+
659
+ Args:
660
+ encoded_inputs:
661
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
662
+ max_length: maximum length of the returned list and optionally padding length (see below).
663
+ Will truncate by taking into account the special tokens.
664
+ padding_strategy: PaddingStrategy to use for padding.
665
+
666
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
667
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
668
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
669
+ The tokenizer padding sides are defined in self.padding_side:
670
+
671
+ - 'left': pads on the left of the sequences
672
+ - 'right': pads on the right of the sequences
673
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
674
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
675
+ `>= 7.5` (Volta).
676
+ return_attention_mask:
677
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
678
+ """
679
+ # Load from model defaults
680
+ if return_attention_mask is None:
681
+ return_attention_mask = "attention_mask" in self.model_input_names
682
+
683
+ required_input = encoded_inputs[self.model_input_names[0]]
684
+
685
+ if padding_strategy == PaddingStrategy.LONGEST:
686
+ max_length = len(required_input)
687
+
688
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
689
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
690
+
691
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
692
+
693
+ # Initialize attention mask if not present.
694
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
695
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
696
+
697
+ if needs_to_be_padded:
698
+ difference = max_length - len(required_input)
699
+ if self.padding_side == "right":
700
+ if return_attention_mask:
701
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
702
+ if "token_type_ids" in encoded_inputs:
703
+ encoded_inputs["token_type_ids"] = (
704
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
705
+ )
706
+ if "bbox" in encoded_inputs:
707
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
708
+ if "labels" in encoded_inputs:
709
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
710
+ if "special_tokens_mask" in encoded_inputs:
711
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
712
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
713
+ elif self.padding_side == "left":
714
+ if return_attention_mask:
715
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
716
+ if "token_type_ids" in encoded_inputs:
717
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
718
+ "token_type_ids"
719
+ ]
720
+ if "bbox" in encoded_inputs:
721
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
722
+ if "labels" in encoded_inputs:
723
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
724
+ if "special_tokens_mask" in encoded_inputs:
725
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
726
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
727
+ else:
728
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
729
+
730
+ return encoded_inputs
731
+
732
+ def build_inputs_with_special_tokens(
733
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
734
+ ) -> List[int]:
735
+ """
736
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
737
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
738
+
739
+ - single sequence: `<s> X </s>`
740
+ - pair of sequences: `<s> A </s></s> B </s>`
741
+
742
+ Args:
743
+ token_ids_0 (`List[int]`):
744
+ List of IDs to which the special tokens will be added.
745
+ token_ids_1 (`List[int]`, *optional*):
746
+ Optional second list of IDs for sequence pairs.
747
+
748
+ Returns:
749
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
750
+ """
751
+
752
+ if token_ids_1 is None:
753
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
754
+ cls = [self.cls_token_id]
755
+ sep = [self.sep_token_id]
756
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
757
+
758
+ def create_token_type_ids_from_sequences(
759
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
760
+ ) -> List[int]:
761
+ """
762
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
763
+ not make use of token type ids, therefore a list of zeros is returned.
764
+
765
+ Args:
766
+ token_ids_0 (`List[int]`):
767
+ List of IDs.
768
+ token_ids_1 (`List[int]`, *optional*):
769
+ Optional second list of IDs for sequence pairs.
770
+
771
+ Returns:
772
+ `List[int]`: List of zeros.
773
+
774
+ """
775
+
776
+ sep = [self.sep_token_id]
777
+ cls = [self.cls_token_id]
778
+
779
+ if token_ids_1 is None:
780
+ return len(cls + token_ids_0 + sep) * [0]
781
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
782
+
783
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
784
+ if not self.can_save_slow_tokenizer:
785
+ raise ValueError(
786
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
787
+ "tokenizer."
788
+ )
789
+
790
+ if not os.path.isdir(save_directory):
791
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
792
+ return
793
+ out_vocab_file = os.path.join(
794
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
795
+ )
796
+
797
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
798
+ copyfile(self.vocab_file, out_vocab_file)
799
+
800
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig"],
29
+ "tokenization_mpnet": ["MPNetTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_mpnet_fast"] = ["MPNetTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_mpnet"] = [
47
+ "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "MPNetForMaskedLM",
49
+ "MPNetForMultipleChoice",
50
+ "MPNetForQuestionAnswering",
51
+ "MPNetForSequenceClassification",
52
+ "MPNetForTokenClassification",
53
+ "MPNetLayer",
54
+ "MPNetModel",
55
+ "MPNetPreTrainedModel",
56
+ ]
57
+
58
+ try:
59
+ if not is_tf_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ _import_structure["modeling_tf_mpnet"] = [
65
+ "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
66
+ "TFMPNetEmbeddings",
67
+ "TFMPNetForMaskedLM",
68
+ "TFMPNetForMultipleChoice",
69
+ "TFMPNetForQuestionAnswering",
70
+ "TFMPNetForSequenceClassification",
71
+ "TFMPNetForTokenClassification",
72
+ "TFMPNetMainLayer",
73
+ "TFMPNetModel",
74
+ "TFMPNetPreTrainedModel",
75
+ ]
76
+
77
+
78
+ if TYPE_CHECKING:
79
+ from .configuration_mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig
80
+ from .tokenization_mpnet import MPNetTokenizer
81
+
82
+ try:
83
+ if not is_tokenizers_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .tokenization_mpnet_fast import MPNetTokenizerFast
89
+
90
+ try:
91
+ if not is_torch_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_mpnet import (
97
+ MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
98
+ MPNetForMaskedLM,
99
+ MPNetForMultipleChoice,
100
+ MPNetForQuestionAnswering,
101
+ MPNetForSequenceClassification,
102
+ MPNetForTokenClassification,
103
+ MPNetLayer,
104
+ MPNetModel,
105
+ MPNetPreTrainedModel,
106
+ )
107
+
108
+ try:
109
+ if not is_tf_available():
110
+ raise OptionalDependencyNotAvailable()
111
+ except OptionalDependencyNotAvailable:
112
+ pass
113
+ else:
114
+ from .modeling_tf_mpnet import (
115
+ TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
116
+ TFMPNetEmbeddings,
117
+ TFMPNetForMaskedLM,
118
+ TFMPNetForMultipleChoice,
119
+ TFMPNetForQuestionAnswering,
120
+ TFMPNetForSequenceClassification,
121
+ TFMPNetForTokenClassification,
122
+ TFMPNetMainLayer,
123
+ TFMPNetModel,
124
+ TFMPNetPreTrainedModel,
125
+ )
126
+
127
+ else:
128
+ import sys
129
+
130
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc ADDED
Binary file (4.65 kB). View file